hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
d594b94497075e211266c3588bec4a7b237eb80d84ee39606cf1b4a30de62462 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2017
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.10'
__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
|
8b9e41106a832902a64f4ae9ad4818b21ba37671e15b59121d315ad5ff52119d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Note: This file incldues code dervived from pywcsgrid2
#
# This file contains Matplotlib transformation objects (e.g. from pixel to world
# coordinates, but also world-to-world).
import abc
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Transform
from ... import units as u
from ...wcs import WCS
from ...wcs.utils import wcs_to_celestial_frame
from ...coordinates import (SkyCoord, frame_transform_graph,
SphericalRepresentation,
UnitSphericalRepresentation,
BaseCoordinateFrame)
class CurvedTransform(Transform, metaclass=abc.ABCMeta):
"""
Abstract base class for non-affine curved transforms
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
@abc.abstractmethod
def transform(self, input):
raise NotImplementedError("")
@abc.abstractmethod
def inverted(self):
raise NotImplementedError("")
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates
"""
has_inverse = True
def __init__(self, wcs, slice=None):
super().__init__()
self.wcs = wcs
if self.wcs.wcs.naxis > 2:
if slice is None:
raise ValueError("WCS has more than 2 dimensions, so ``slice`` should be set")
elif len(slice) != self.wcs.wcs.naxis:
raise ValueError("slice should have as many elements as WCS "
"has dimensions (should be {0})".format(self.wcs.wcs.naxis))
else:
self.slice = slice
self.x_index = slice.index('x')
self.y_index = slice.index('y')
else:
self.slice = None
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs == other.wcs
and self.slice == other.slice)
@property
def input_dims(self):
return self.wcs.wcs.naxis
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions in the WCS. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
if world.shape[1] != self.wcs.wcs.naxis:
raise ValueError("Second dimension of input values should match number of WCS coordinates")
if world.shape[0] == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.wcs_world2pix(world, 1) - 1
if self.slice is None:
return pixel
else:
return pixel[:, (self.x_index, self.y_index)]
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSPixel2WorldTransform(self.wcs, slice=self.slice)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates
"""
has_inverse = True
def __init__(self, wcs, slice=None):
super().__init__()
self.wcs = wcs
self.slice = slice
if self.slice is not None:
self.x_index = slice.index('x')
self.y_index = slice.index('y')
def __eq__(self, other):
return (isinstance(other, type(self)) and self.wcs == other.wcs
and self.slice == other.slice)
@property
def output_dims(self):
return self.wcs.wcs.naxis
def get_coord_slices(self, xmin, xmax, ymin, ymax, nx, ny):
"""
Get a coordinate slice
"""
x = np.linspace(xmin, xmax, nx)
y = np.linspace(ymin, ymax, ny)
Y, X = np.meshgrid(y, x)
pixel = np.array([X.ravel(), Y.ravel()]).transpose()
world = self.transform(pixel)
return X, Y, [world[:, i].reshape(nx, ny).transpose() for i in range(self.wcs.wcs.naxis)]
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions in
the WCS
"""
if self.slice is None:
pixel_full = pixel.copy()
else:
pixel_full = []
for index in self.slice:
if index == 'x':
pixel_full.append(pixel[:, 0])
elif index == 'y':
pixel_full.append(pixel[:, 1])
else:
pixel_full.append(index)
pixel_full = np.array(np.broadcast_arrays(*pixel_full)).transpose()
pixel_full += 1
if pixel_full.shape[0] == 0:
world = np.zeros((0, 2))
else:
world = self.wcs.wcs_pix2world(pixel_full, 1)
# At the moment, one has to manually check that the transformation
# round-trips, otherwise it should be considered invalid.
pixel_check = self.wcs.wcs_world2pix(world, 1)
with np.errstate(invalid='ignore'):
invalid = np.any(np.abs(pixel_check - pixel_full) > 1., axis=1)
world[invalid] = np.nan
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSWorld2PixelTransform(self.wcs, slice=self.slice)
class CoordinateTransform(CurvedTransform):
has_inverse = True
def __init__(self, input_system, output_system):
super().__init__()
self._input_system_name = input_system
self._output_system_name = output_system
if isinstance(self._input_system_name, WCS):
self.input_system = wcs_to_celestial_frame(self._input_system_name)
elif isinstance(self._input_system_name, str):
self.input_system = frame_transform_graph.lookup_name(self._input_system_name)
if self.input_system is None:
raise ValueError("Frame {0} not found".format(self._input_system_name))
elif isinstance(self._input_system_name, BaseCoordinateFrame):
self.input_system = self._input_system_name
else:
raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance")
if isinstance(self._output_system_name, WCS):
self.output_system = wcs_to_celestial_frame(self._output_system_name)
elif isinstance(self._output_system_name, str):
self.output_system = frame_transform_graph.lookup_name(self._output_system_name)
if self.output_system is None:
raise ValueError("Frame {0} not found".format(self._output_system_name))
elif isinstance(self._output_system_name, BaseCoordinateFrame):
self.output_system = self._output_system_name
else:
raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance")
if self.output_system == self.input_system:
self.same_frames = True
else:
self.same_frames = False
@property
def same_frames(self):
return self._same_frames
@same_frames.setter
def same_frames(self, same_frames):
self._same_frames = same_frames
def transform(self, input_coords):
"""
Transform one set of coordinates to another
"""
if self.same_frames:
return input_coords
input_coords = input_coords*u.deg
x_in, y_in = input_coords[:, 0], input_coords[:, 1]
c_in = SkyCoord(UnitSphericalRepresentation(x_in, y_in),
frame=self.input_system)
# We often need to transform arrays that contain NaN values, and filtering
# out the NaN values would have a performance hit, so instead we just pass
# on all values and just ignore Numpy warnings
with np.errstate(all='ignore'):
c_out = c_in.transform_to(self.output_system)
if issubclass(c_out.representation, (SphericalRepresentation, UnitSphericalRepresentation)):
lon = c_out.data.lon.deg
lat = c_out.data.lat.deg
else:
lon = c_out.spherical.lon.deg
lat = c_out.spherical.lat.deg
return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1)
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return CoordinateTransform(self._output_system_name, self._input_system_name)
|
66e1e7aa037162d36cd49966d6338c9783f6623f3f88a6b0189e5b16eea15cd5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import numpy as np
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from matplotlib.patches import PathPatch
from matplotlib import rcParams
from ... import units as u
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .ticks import Ticks
from .ticklabels import TickLabels
from .axislabels import AxisLabels
from .grid_paths import get_lon_lat_path, get_gridline_path
__all__ = ['CoordinateHelper']
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid='ignore'):
return np.mod(values - coord_wrap, 360.) - (360. - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : float
The angle at which the longitude wraps (defaults to 360)
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(self, parent_axes=None, parent_map=None, transform=None,
coord_index=None, coord_type='scalar', coord_unit=None,
coord_wrap=None, frame=None, format_unit=None):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self.format_unit = format_unit
self.frame = frame
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
self.ticks.display_minor_ticks(False)
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
#
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
lines_to_patches_linestyle = {'-': 'solid',
'--': 'dashed',
'-.': 'dashdot',
':': 'dotted',
'none': 'none',
'None': 'none',
' ': 'none',
'': 'none'}
self.grid_lines_kwargs = {'visible': False,
'facecolor': 'none',
'edgecolor': rcParams['grid.color'],
'linestyle': lines_to_patches_linestyle[rcParams['grid.linestyle']],
'linewidth': rcParams['grid.linewidth'],
'alpha': rcParams.get('grid.alpha', 1.0),
'transform': self.parent_axes.transData}
def grid(self, draw_grid=True, grid_type='lines', **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended.
"""
if grid_type in ('lines', 'contours'):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs.pop('color')
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs['visible']:
if not draw_grid:
self.grid_lines_kwargs['visible'] = False
else:
self.grid_lines_kwargs['visible'] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : float, optional
The value to wrap at for angular coordinates
"""
self.coord_type = coord_type
if coord_type == 'longitude' and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != 'longitude' and coord_wrap is not None:
raise NotImplementedError('coord_wrap is not yet supported '
'for non-longitude coordinates')
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == 'scalar':
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ['longitude', 'latitude']:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(unit=self.coord_unit,
format_unit=self.format_unit)
else:
raise ValueError("coord_type should be one of 'scalar', 'longitude', or 'latitude'")
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or Formatter
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter "
"instance")
def format_coord(self, value, format='auto'):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == 'longitude':
value = wrap_angle_at(value, self.coord_wrap)
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : The separator between numbers in sexagesimal
representation. Can be either a string or a tuple.
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, str) or isinstance(separator, tuple):
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string or a tuple")
def set_format_unit(self, unit):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
"""
self._formatter_locator.format_unit = u.Unit(unit)
def set_ticks(self, values=None, spacing=None, number=None, size=None,
width=None, color=None, alpha=None, exclude_overlapping=False):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple
A valid Matplotlib color for the ticks
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError("At most one of values, spacing, or number should "
"be specified")
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(self, **kwargs):
"""
Set the visual properties for the tick labels.
Parameters
----------
kwargs
Keyword arguments are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group('grid lines')
self._update_ticks()
if self.grid_lines_kwargs['visible']:
if self._grid_type == 'lines':
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == 'lines':
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group('grid lines')
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox, ticks_locs):
renderer.open_group('ticks')
self.ticks.draw(renderer, ticks_locs)
self.ticklabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox)
renderer.close_group('ticks')
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, ticks_locs, visible_ticks):
renderer.open_group('axis labels')
self.axislabels.draw(renderer, bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=ticks_locs,
visible_ticks=visible_ticks)
renderer.close_group('axis labels')
def _update_ticks(self):
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(*coord_range[self.coord_index])
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(self._fl_spacing, self.get_minor_frequency(), *coord_range[self.coord_index])
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# http://matplotlib.org/users/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
world0 = self.transform.transform(pixel0)[:, self.coord_index]
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == 'lower' else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = (world1 - world0)
dy = (world2 - world0)
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.)
dy = wrap_angle_at(dy, 180.)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack([spine.normal_angle, spine.normal_angle[-1]])
with np.errstate(invalid='ignore'):
reset = (((normal_angle_full - tick_angle) % 360 > 90.) &
((tick_angle - normal_angle_full) % 360 > 90.))
tick_angle[reset] -= 180.
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
with np.errstate(invalid='ignore'):
w1[w2 - w1 > 180.] += 360
w2[w1 - w2 > 180.] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(minor_ticks_w_coordinates, spine, axis, w1,
w2, tick_angle, ticks='minor')
# format tick labels, add to scene
text = self.formatter(self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(self, tick_world_coordinates, spine, axis, w1, w2,
tick_angle, ticks='major'):
if self.coord_type == 'longitude':
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack([tick_world_coordinates_values,
tick_world_coordinates_values + 360])
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid='ignore'):
intersections = np.hstack([np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0]])
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.e-13, atol=1.e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (spine.data[imax, 0] - spine.data[imin, 0])
y_data_i = spine.data[imin, 1] + frac * (spine.data[imax, 1] - spine.data[imin, 1])
x_pix_i = spine.pixel[imin, 0] + frac * (spine.pixel[imax, 0] - spine.pixel[imin, 0])
y_pix_i = spine.pixel[imin, 1] + frac * (spine.pixel[imax, 1] - spine.pixel[imin, 1])
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.:
delta_angle -= 360.
elif delta_angle < -180.:
delta_angle += 360.
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == 'longitude':
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap)
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == 'major':
self.ticks.add(axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(dict(axis=axis,
pixel=(x_pix_i, y_pix_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac))
self.lbl_world.append(world)
else:
self.ticks.add_minor(minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(coord_range[1][0], coord_range[1][1], n_samples)
else:
xy_world[subset, 0] = np.linspace(coord_range[0][0], coord_range[0][1], n_samples)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(self._get_gridline(xy_world[subset], pixel[subset], xy_world_round[subset]))
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == 'scalar':
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _update_grid_contour(self):
if hasattr(self, '_grid'):
for line in self._grid.collections:
line.remove()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x, y, field = self.transform.get_coord_slices(xmin, xmax, ymin, ymax, 200, 200)
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
field = field[self.coord_index]
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == 'longitude':
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (tick_world_coordinates_values[0] + tick_world_coordinates_values[1])
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(tick_world_coordinates_values, mid)
# Replace wraps by NaN
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (np.abs(np.diff(field[:-1, :], axis=1)) > 180)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
self._grid = self.parent_axes.contour(x, y, field.transpose(), levels=np.sort(tick_world_coordinates_values))
else:
self._grid = None
|
9a84221b6cf3b674aaad062b1f94a94de3fe5b3edef741e01c209765ce260549 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from ... import units as u
from ...units import UnitsError
from ...coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
class BaseFormatterLocator:
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None):
if (values, number, spacing).count(None) < 2:
raise ValueError("At most one of values/number/spacing can be specifed")
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError("value should be in units compatible with "
"coordinate units ({0}) but found {1}".format(self._unit, values.unit))
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, decimal=None, format_unit=None):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is None:
decimal = True
elif decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
elif decimal is None:
decimal = False
self._unit = unit
self._format_unit = format_unit or unit
self._decimal = decimal
self._sep = None
super().__init__(values=values, number=number, spacing=spacing,
format=format)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity) or
spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity "
"instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError("Invalid format: {0}".format(value))
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self._decimal:
spacing = self._format_unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self._decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format='auto'):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self._decimal
unit = self._format_unit
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len("{0:.10f}".format(spacing).replace('0', ' ').strip().split('.', 1)[1])
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
if decimal:
sep = None
fmt = None
elif self._sep is not None:
sep = self._sep
fmt = None
else:
sep = 'fromunit'
if unit == u.degree:
if format == 'latex' or (format == 'auto' and rcParams['text.usetex']):
fmt = 'latex'
else:
sep = ('\xb0', "'", '"')
fmt = None
else:
if format == 'ascii':
fmt = None
elif format == 'latex' or (format == 'auto' and rcParams['text.usetex']):
fmt = 'latex'
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (r'$\mathregular{^h}$', r'$\mathregular{^m}$', r'$\mathregular{^s}$')
fmt = None
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if unit is not None:
self._unit = unit
self._format_unit = format_unit or unit
elif spacing is not None:
self._unit = spacing.unit
self._format_unit = format_unit or spacing.unit
elif values is not None:
self._unit = values.unit
self._format_unit = format_unit or values.unit
super().__init__(values=values, number=number, spacing=spacing,
format=format)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError("Invalid format: {0}".format(value))
@property
def base_spacing(self):
return self._format_unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format='auto'):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values]
else:
return []
|
199cfecf19aaeea59df6e636aafca466088e8d86d08b303c7191e492e8b0fcbc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import partial
from collections import defaultdict
import numpy as np
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
from ...coordinates import SkyCoord, BaseCoordinateFrame
from ...wcs import WCS
from ...wcs.utils import wcs_to_celestial_frame
from .transforms import (WCSPixel2WorldTransform, WCSWorld2PixelTransform,
CoordinateTransform)
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta
from .frame import EllipticalFrame, RectangularFrame
__all__ = ['WCSAxes', 'WCSAxesSubplot']
VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0., 0.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way."""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
rect : list
The position of the axes in the figure in relative units. Should be
given as ``[left, bottom, width, height]``.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances.
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
transData=None, slices=None, frame_class=RectangularFrame,
**kwargs):
super().__init__(fig, rect, **kwargs)
self._bboxes = []
self.frame_class = frame_class
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return "%s %s (pixel)" % (x, y)
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
xw = coords[self._x_index].format_coord(world[self._x_index], format='ascii')
yw = coords[self._y_index].format_coord(world[self._y_index], format='ascii')
if self._display_coords_index == 0:
system = "world"
else:
system = "world, overlay {0}".format(self._display_coords_index)
coord_string = "%s %s (%s)" % (xw, yw, system)
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'w':
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.get('origin', 'lower')
if origin == 'upper':
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
# To check whether the image is a PIL image we can check if the data
# has a 'getpixel' attribute - this is what Matplotlib's AxesImage does
try:
from PIL.Image import Image, FLIP_TOP_BOTTOM
except ImportError:
# We don't need to worry since PIL is not installed, so user cannot
# have passed RGB image.
pass
else:
if isinstance(X, Image) or hasattr(X, 'getpixel'):
X = X.transpose(FLIP_TOP_BOTTOM)
kwargs['origin'] = 'lower'
return super().imshow(X, *args, **kwargs)
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot : This method is called from this function with all arguments passed to it.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
plot_data = []
for coord in self.coords:
if coord.coord_type == 'longitude':
plot_data.append(frame0.data.lon.to_value(coord.coord_unit))
elif coord.coord_type == 'latitude':
plot_data.append(frame0.data.lat.to_value(coord.coord_unit))
else:
raise NotImplementedError("Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude.")
if 'transform' in kwargs.keys():
raise TypeError("The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame.")
transform = self.get_transform(frame0)
kwargs.update({'transform': transform})
args = tuple(plot_data) + args[1:]
super().plot(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
wcs.wcs.set()
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, 'coords'):
previous_frame = {'path': self.coords.frame._path,
'color': self.coords.frame.get_color(),
'linewidth': self.coords.frame.get_linewidth()}
else:
previous_frame = {'path': None}
self.coords = CoordinatesMap(self, wcs=self.wcs, slice=slices,
transform=transform, coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame['path'])
if previous_frame['path'] is not None:
self.coords.frame.set_color(previous_frame['color'])
self.coords.frame.set_linewidth(previous_frame['linewidth'])
self._all_coords = [self.coords]
if slices is None:
self.slices = ('x', 'y')
self._x_index = 0
self._y_index = 1
else:
self.slices = slices
self._x_index = self.slices.index('x')
self._y_index = self.slices.index('y')
# Common default settings for Rectangular Frame
if self.frame_class is RectangularFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == 'x':
self.coords[coord_index].set_axislabel_position('b')
self.coords[coord_index].set_ticklabel_position('b')
elif self.slices[coord_index] == 'y':
self.coords[coord_index].set_axislabel_position('l')
self.coords[coord_index].set_ticklabel_position('l')
else:
self.coords[coord_index].set_axislabel_position('')
self.coords[coord_index].set_ticklabel_position('')
self.coords[coord_index].set_ticks_position('')
# Common default settings for Elliptical Frame
elif self.frame_class is EllipticalFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == 'x':
self.coords[coord_index].set_axislabel_position('h')
self.coords[coord_index].set_ticklabel_position('h')
self.coords[coord_index].set_ticks_position('h')
elif self.slices[coord_index] == 'y':
self.coords[coord_index].set_ticks_position('c')
self.coords[coord_index].set_axislabel_position('c')
self.coords[coord_index].set_ticklabel_position('c')
else:
self.coords[coord_index].set_axislabel_position('')
self.coords[coord_index].set_ticklabel_position('')
self.coords[coord_index].set_ticks_position('')
def draw_wcsaxes(self, renderer):
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
ticks_locs = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
for coord in coords:
coord._draw_ticks(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
ticks_locs=ticks_locs[coord])
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
for coord in coords:
coord._draw_axislabels(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
ticks_locs=ticks_locs[coord],
visible_ticks=visible_ticks)
self.coords.frame.draw(renderer)
def draw(self, renderer, inframe=False):
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, inframe=inframe)
self._drawn = True
def set_xlabel(self, label, labelpad=1, **kwargs):
self.coords[self._x_index].set_axislabel(label, minpad=labelpad, **kwargs)
def set_ylabel(self, label, labelpad=1, **kwargs):
self.coords[self._y_index].set_axislabel(label, minpad=labelpad, **kwargs)
def get_xlabel(self):
return self.coords[self._x_index].get_axislabel()
def get_ylabel(self):
return self.coords[self._y_index].get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
coords = CoordinatesMap(self, frame, frame_class=self.frame_class)
else:
if coord_meta is None:
coord_meta = get_coord_meta(frame)
transform = self._get_transform_no_transdata(frame)
coords = CoordinatesMap(self, transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position('t')
coords[1].set_axislabel_position('r')
coords[0].set_ticklabel_position('t')
coords[1].set_ticklabel_position('r')
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if self.wcs is None and frame != 'pixel':
raise ValueError('No WCS specified, so only pixel coordinates are available')
if isinstance(frame, WCS):
coord_in = wcs_to_celestial_frame(self.wcs)
coord_out = wcs_to_celestial_frame(frame)
if coord_in == coord_out:
return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) +
WCSWorld2PixelTransform(frame))
else:
return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) +
CoordinateTransform(self.wcs, frame) +
WCSWorld2PixelTransform(frame))
elif frame == 'pixel':
return Affine2D()
elif isinstance(frame, Transform):
pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices)
return pixel2world + frame
else:
pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices)
if frame == 'world':
return pixel2world
else:
coordinate_transform = CoordinateTransform(self.wcs, frame)
if coordinate_transform.same_frames:
return pixel2world
else:
return pixel2world + CoordinateTransform(self.wcs, frame)
def get_tightbbox(self, renderer):
if not self.get_visible():
return
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis='both', *, which='major', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
"""
if not hasattr(self, 'coords'):
return
if which != 'major':
raise NotImplementedError('Plotting the grid for the minor ticks is '
'not supported.')
if axis == 'both':
self.coords.grid(draw_grid=b, **kwargs)
elif axis == 'x':
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == 'y':
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError('axis should be one of x/y/both')
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
|
1a90cdc6fc24c2598f24e9dbdcdefb9e6ab1c49f5f653c6f0de670ac05bf2283 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.pad = 0.3
self._exclude_overlapping = False
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.text = {}
self.disp = {}
def add(self, axis, world, pixel, angle, text, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.text[axis] = [text]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.pixel[axis] = sort_using(self.pixel[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
# In the following loop, we need to ignore the last character,
# hence the len(t1) - 1. This is because if we have two strings
# like 13d14m15s we want to make sure that we keep the last
# part (15s) even if the two labels are identical.
for j in range(len(t1) - 1):
if t1[j] != t2[j]:
break
if t1[j] not in '-0123456789.':
start = j + 1
t1 = self.text[axis][i]
if start != 0:
starts_dollar = self.text[axis][i].startswith('$')
self.text[axis][i] = self.text[axis][i][start:]
if starts_dollar:
self.text[axis][i] = '$' + self.text[axis][i]
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def draw(self, renderer, bboxes, ticklabels_bbox):
if not self.get_visible():
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
# In the event that the label is empty (which is not expected
# but could happen in unforeseen corner cases), we should just
# skip to the next label.
if self.text[axis][i] == '':
continue
self.set_text(self.text[axis][i])
x, y = self.pixel[axis][i]
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.:
ha = 'right'
va = 'bottom'
dx = - text_size * 0.5
dy = - text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.) < 45:
ha = 'center'
va = 'bottom'
dx = 0
dy = - text_size * 1.5
elif np.abs(self.angle[axis][i] - 180.) < 45:
ha = 'left'
va = 'bottom'
dx = text_size * 0.5
dy = - text_size * 0.5
else:
ha = 'center'
va = 'bottom'
dx = 0
dy = text_size * 0.2
self.set_position((x + dx, y + dy))
self.set_ha(ha)
self.set_va(va)
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_position((x, y))
bb = super().get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * text_size * self.pad
dy += ddy * text_size * self.pad
self.set_position((x - dx, y - dy))
self.set_ha('center')
self.set_va('center')
bb = super().get_window_extent(renderer)
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super().draw(renderer)
bboxes.append(bb)
ticklabels_bbox[axis].append(bb)
|
18a77b1493997ea9adb3d0d02fce1eeeb7300fd5686890fbf58d93a1688162bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .coordinate_helpers import CoordinateHelper
from .transforms import WCSPixel2WorldTransform
from .utils import coord_type_from_ctype
from .frame import RectangularFrame
from .coordinate_range import find_coordinate_range
class CoordinatesMap:
"""
A container for coordinate helpers that represents a coordinate system.
This object can be used to access coordinate helpers by index (like a list)
or by name (like a dictionary).
Parameters
----------
axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate map belongs to.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances.
slice : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
previous_frame_path : `~matplotlib.path.Path`, optional
When changing the WCS of the axes, the frame instance will change but
we might want to keep re-using the same underlying matplotlib
`~matplotlib.path.Path` - in that case, this can be passed to this
keyword argument.
"""
def __init__(self, axes, wcs=None, transform=None, coord_meta=None,
slice=None, frame_class=RectangularFrame,
previous_frame_path=None):
# Keep track of parent axes and WCS
self._axes = axes
if wcs is None:
if transform is None:
raise ValueError("Either `wcs` or `transform` are required")
if coord_meta is None:
raise ValueError("`coord_meta` is required when "
"`transform` is passed")
self._transform = transform
naxis = 2
else:
if transform is not None:
raise ValueError("Cannot specify both `wcs` and `transform`")
if coord_meta is not None:
raise ValueError("Cannot pass `coord_meta` if passing `wcs`")
self._transform = WCSPixel2WorldTransform(wcs, slice=slice)
naxis = wcs.wcs.naxis
self.frame = frame_class(axes, self._transform, path=previous_frame_path)
# Set up coordinates
self._coords = []
self._aliases = {}
for coord_index in range(naxis):
# Extract coordinate metadata from WCS object or transform
if wcs is not None:
coord_unit = wcs.wcs.cunit[coord_index]
coord_type, format_unit, coord_wrap = coord_type_from_ctype(wcs.wcs.ctype[coord_index])
name = wcs.wcs.ctype[coord_index][:4].replace('-', '')
else:
try:
coord_type = coord_meta['type'][coord_index]
coord_wrap = coord_meta['wrap'][coord_index]
coord_unit = coord_meta['unit'][coord_index]
name = coord_meta['name'][coord_index]
format_unit = None
except IndexError:
raise ValueError("coord_meta items should have a length of {0}".format(len(wcs.wcs.naxis)))
self._coords.append(CoordinateHelper(parent_axes=axes,
parent_map=self,
transform=self._transform,
coord_index=coord_index,
coord_type=coord_type,
coord_wrap=coord_wrap,
coord_unit=coord_unit,
format_unit=format_unit,
frame=self.frame))
# Set up aliases for coordinates
self._aliases[name.lower()] = coord_index
def __getitem__(self, item):
if isinstance(item, str):
return self._coords[self._aliases[item.lower()]]
else:
return self._coords[item]
def set_visible(self, visibility):
raise NotImplementedError()
def __iter__(self):
for coord in self._coords:
yield coord
def grid(self, draw_grid=True, grid_type='lines', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : { 'lines' | 'contours' }
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended.
"""
for coord in self:
coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs)
def get_coord_range(self):
xmin, xmax = self._axes.get_xlim()
ymin, ymax = self._axes.get_ylim()
return find_coordinate_range(self._transform,
[xmin, xmax, ymin, ymax],
[coord.coord_type for coord in self],
[coord.coord_unit for coord in self])
|
58db26ffacb4c75efd8873e39819aa27903223ad501db80e2289dbeca0f53bd1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from ... import units as u
from ...coordinates import BaseCoordinateFrame
# Modified from axis_artist, supports astropy.units
def select_step_degree(dv):
# Modified from axis_artist, supports astropy.units
if dv > 1. * u.arcsec:
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_units = [u.degree] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.
minute_units = [u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.
second_units = [u.arcsec] * len(second_limits_)
degree_limits = np.concatenate([second_limits_,
minute_limits_,
degree_limits_])
degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_
degree_units = second_units + minute_units + degree_units
n = degree_limits.searchsorted(dv.to(u.degree))
step = degree_steps[n]
unit = degree_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec
def select_step_hour(dv):
if dv > 15. * u.arcsec:
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24]
hour_units = [u.hourangle] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.
minute_units = [15. * u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.
second_units = [15. * u.arcsec] * len(second_limits_)
hour_limits = np.concatenate([second_limits_,
minute_limits_,
hour_limits_])
hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_
hour_units = second_units + minute_units + hour_units
n = hour_limits.searchsorted(dv.to(u.hourangle))
step = hour_steps[n]
unit = hour_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(15. * u.arcsec)) * (15. * u.arcsec)
def select_step_scalar(dv):
log10_dv = np.log10(dv)
base = np.floor(log10_dv)
frac = log10_dv - base
steps = np.log10([1, 2, 5, 10])
imin = np.argmin(np.abs(frac - steps))
return 10. ** (base + steps[imin])
def get_coord_meta(frame):
coord_meta = {}
coord_meta['type'] = ('longitude', 'latitude')
coord_meta['wrap'] = (None, None)
coord_meta['unit'] = (u.deg, u.deg)
from astropy.coordinates import frame_transform_graph
if isinstance(frame, str):
initial_frame = frame
frame = frame_transform_graph.lookup_name(frame)
if frame is None:
raise ValueError("Unknown frame: {0}".format(initial_frame))
if not isinstance(frame, BaseCoordinateFrame):
frame = frame()
names = list(frame.representation_component_names.keys())
coord_meta['name'] = names[:2]
return coord_meta
def coord_type_from_ctype(ctype):
"""
Determine whether a particular WCS ctype corresponds to an angle or scalar
coordinate.
"""
if ctype[:4] in ['RA--']:
return 'longitude', u.hourangle, None
elif ctype[1:4] == 'LON':
return 'longitude', None, None
elif ctype[:4] in ['HPLN']:
return 'longitude', None, 180.
elif ctype[:4] in ['DEC-', 'HPLT'] or ctype[1:4] == 'LAT':
return 'latitude', None, None
else:
return 'scalar', None, None
|
7f3fd6c0efbe65e3fa9205f24e9b2aa07deb15afa99222222d6dc45a4d88b9fa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from ... import units as u
# Algorithm inspired by PGSBOX from WCSLIB by M. Calabretta
LONLAT = {'longitude', 'latitude'}
def wrap_180(values):
values_new = values % 360.
with np.errstate(invalid='ignore'):
values_new[values_new > 180.] -= 360
return values_new
def find_coordinate_range(transform, extent, coord_types, coord_units):
"""
Find the range of coordinates to use for ticks/grids
Parameters
----------
transform : func
Function to transform pixel to world coordinates. Should take two
values (the pixel coordinates) and return two values (the world
coordinates).
extent : iterable
The range of the image viewport in pixel coordinates, given as [xmin,
xmax, ymin, ymax].
coord_types : list of str
Whether each coordinate is a ``'longitude'``, ``'latitude'``, or
``'scalar'`` value.
coord_units : list of `astropy.units.Unit`
The units for each coordinate
"""
# Sample coordinates on a NX x NY grid.
from . import conf
nx = ny = conf.coordinate_range_samples
x = np.linspace(extent[0], extent[1], nx + 1)
y = np.linspace(extent[2], extent[3], ny + 1)
xp, yp = np.meshgrid(x, y)
world = transform.transform(np.vstack([xp.ravel(), yp.ravel()]).transpose())
ranges = []
for coord_index, coord_type in enumerate(coord_types):
xw = world[:, coord_index].reshape(xp.shape)
if coord_type in LONLAT:
unit = coord_units[coord_index]
xw = xw * unit.to(u.deg)
# Iron out coordinates along first row
wjump = xw[0, 1:] - xw[0, :-1]
with np.errstate(invalid='ignore'):
reset = np.abs(wjump) > 180.
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.
wjump = 360. * (wjump / 360.).astype(int)
xw[0, 1:][reset] -= wjump[reset]
# Now iron out coordinates along all columns, starting with first row.
wjump = xw[1:] - xw[:1]
with np.errstate(invalid='ignore'):
reset = np.abs(wjump) > 180.
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.
wjump = 360. * (wjump / 360.).astype(int)
xw[1:][reset] -= wjump[reset]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min = np.nanmin(xw)
xw_max = np.nanmax(xw)
# Check if range is smaller when normalizing to the range 0 to 360
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(xw % 360.)
xw_max_check = np.nanmax(xw % 360.)
if xw_max_check - xw_min_check <= xw_max - xw_min < 360.:
xw_min = xw_min_check
xw_max = xw_max_check
# Check if range is smaller when normalizing to the range -180 to 180
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(wrap_180(xw))
xw_max_check = np.nanmax(wrap_180(xw))
if xw_max_check - xw_min_check < 360. and xw_max - xw_min >= xw_max_check - xw_min_check:
xw_min = xw_min_check
xw_max = xw_max_check
x_range = xw_max - xw_min
if coord_type == 'longitude':
if x_range > 300.:
xw_min = 0.
xw_max = 360 - np.spacing(360.)
elif xw_min < 0.:
xw_min = max(-180., xw_min - 0.1 * x_range)
xw_max = min(+180., xw_max + 0.1 * x_range)
else:
xw_min = max(0., xw_min - 0.1 * x_range)
xw_max = min(360., xw_max + 0.1 * x_range)
elif coord_type == 'latitude':
xw_min = max(-90., xw_min - 0.1 * x_range)
xw_max = min(+90., xw_max + 0.1 * x_range)
if coord_type in LONLAT:
xw_min *= u.deg.to(unit)
xw_max *= u.deg.to(unit)
ranges.append((xw_min, xw_max))
return ranges
|
6af6984086a99bb1966e3cb597539eb31a7233194754500ab66802a97f627f33 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_almost_equal
from .... import units as u
from ..utils import (select_step_degree, select_step_hour, select_step_scalar,
coord_type_from_ctype)
from ....tests.helper import (assert_quantity_allclose as
assert_almost_equal_quantity)
def test_select_step_degree():
assert_almost_equal_quantity(select_step_degree(127 * u.deg), 180. * u.deg)
assert_almost_equal_quantity(select_step_degree(44 * u.deg), 45. * u.deg)
assert_almost_equal_quantity(select_step_degree(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(2 * u.arcmin), 2 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(2.2 * u.arcsec), 2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.8 * u.arcsec), 1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.2 * u.arcsec), 0.2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.11 * u.arcsec), 0.1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.022 * u.arcsec), 0.02 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.0043 * u.arcsec), 0.005 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.00083 * u.arcsec), 0.001 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.000027 * u.arcsec), 0.00002 * u.arcsec)
def test_select_step_hour():
assert_almost_equal_quantity(select_step_hour(127 * u.deg), 8. * u.hourangle)
assert_almost_equal_quantity(select_step_hour(44 * u.deg), 3. * u.hourangle)
assert_almost_equal_quantity(select_step_hour(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(2 * u.arcmin), 1.5 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(2.2 * u.arcsec), 3. * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.8 * u.arcsec), 0.75 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.2 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.11 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.022 * u.arcsec), 0.03 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.0043 * u.arcsec), 0.003 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.00083 * u.arcsec), 0.00075 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.000027 * u.arcsec), 0.00003 * u.arcsec)
def test_select_step_scalar():
assert_almost_equal(select_step_scalar(33122.), 50000.)
assert_almost_equal(select_step_scalar(433.), 500.)
assert_almost_equal(select_step_scalar(12.3), 10)
assert_almost_equal(select_step_scalar(3.3), 5.)
assert_almost_equal(select_step_scalar(0.66), 0.5)
assert_almost_equal(select_step_scalar(0.0877), 0.1)
assert_almost_equal(select_step_scalar(0.00577), 0.005)
assert_almost_equal(select_step_scalar(0.00022), 0.0002)
assert_almost_equal(select_step_scalar(0.000012), 0.00001)
assert_almost_equal(select_step_scalar(0.000000443), 0.0000005)
def test_coord_type_from_ctype():
assert coord_type_from_ctype(' LON') == ('longitude', None, None)
assert coord_type_from_ctype(' LAT') == ('latitude', None, None)
assert coord_type_from_ctype('HPLN') == ('longitude', None, 180.)
assert coord_type_from_ctype('HPLT') == ('latitude', None, None)
assert coord_type_from_ctype('RA--') == ('longitude', u.hourangle, None)
assert coord_type_from_ctype('DEC-') == ('latitude', None, None)
assert coord_type_from_ctype('spam') == ('scalar', None, None)
|
45c784dfd9fafdbed487a122e57cdc5f3b9de400ff90f224dd16f20b60ab4bad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from matplotlib import rc_context
from .... import units as u
from ....tests.helper import assert_quantity_allclose
from ....units import UnitsError
from ..formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
class TestAngleFormatterLocator:
def test_no_options(self):
fl = AngleFormatterLocator()
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], number=5)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(number=5, spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], number=5, spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
def test_values(self):
fl = AngleFormatterLocator(values=[0.1, 1., 14.] * u.degree)
assert fl.values.to_value(u.degree).tolist() == [0.1, 1., 14.]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [0.1, 1., 14.])
def test_number(self):
fl = AngleFormatterLocator(number=7)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [35., 40., 45., 50., 55.])
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 34.75, 35., 35.25, 35.5, 35.75, 36.])
fl.format = 'dd'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35., 36.])
def test_spacing(self):
with pytest.raises(TypeError) as exc:
AngleFormatterLocator(spacing=3.)
assert exc.value.args[0] == "spacing should be an astropy.units.Quantity instance with units of angle"
fl = AngleFormatterLocator(spacing=3. * u.degree)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3. * u.degree
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [36., 39., 42., 45., 48., 51., 54.])
fl.spacing = 30. * u.arcmin
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 35., 35.5, 36.])
fl.format = 'dd'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35., 36.])
def test_minor_locator(self):
fl = AngleFormatterLocator()
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [36., 37., 38.,
39., 41., 42., 43., 44., 46., 47., 48., 49., 51.,
52., 53., 54.])
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1., 14.] * u.degree
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [])
@pytest.mark.parametrize(('format', 'string'), [('dd', '15\xb0'),
('dd:mm', '15\xb024\''),
('dd:mm:ss', '15\xb023\'32"'),
('dd:mm:ss.s', '15\xb023\'32.0"'),
('dd:mm:ss.ssss', '15\xb023\'32.0316"'),
('hh', '1h'),
('hh:mm', '1h02m'),
('hh:mm:ss', '1h01m34s'),
('hh:mm:ss.s', '1h01m34.1s'),
('hh:mm:ss.ssss', '1h01m34.1354s'),
('d', '15'),
('d.d', '15.4'),
('d.dd', '15.39'),
('d.ddd', '15.392'),
('m', '924'),
('m.m', '923.5'),
('m.mm', '923.53'),
('s', '55412'),
('s.s', '55412.0'),
('s.ss', '55412.03'),
])
def test_format(self, format, string):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.formatter([15.392231] * u.degree, None, format='ascii')[0] == string
@pytest.mark.parametrize(('separator', 'format', 'string'), [(('deg', "'", '"'), 'dd', '15deg'),
(('deg', "'", '"'), 'dd:mm', '15deg24\''),
(('deg', "'", '"'), 'dd:mm:ss', '15deg23\'32"'),
((':', "-", 's'), 'dd:mm:ss.s', '15:23-32.0s'),
(':', 'dd:mm:ss.s', '15:23:32.0'),
((':', ":", 's'), 'hh', '1:'),
(('-', "-", 's'), 'hh:mm:ss.ssss', '1-01-34.1354s'),
(('d', ":", '"'), 'd', '15'),
(('d', ":", '"'), 'd.d', '15.4'),
])
def test_separator(self, separator, format, string):
fl = AngleFormatterLocator(number=5, format=format)
fl.sep = separator
assert fl.formatter([15.392231] * u.degree, None)[0] == string
def test_latex_format(self):
fl = AngleFormatterLocator(number=5, format="dd:mm:ss")
assert fl.formatter([15.392231] * u.degree, None)[0] == '15\xb023\'32"'
with rc_context(rc={'text.usetex': True}):
assert fl.formatter([15.392231] * u.degree, None)[0] == "$15^\\circ23{}^\\prime32{}^{\\prime\\prime}$"
@pytest.mark.parametrize(('format'), ['x.xxx', 'dd.ss', 'dd:ss', 'mdd:mm:ss'])
def test_invalid_formats(self, format):
fl = AngleFormatterLocator(number=5)
with pytest.raises(ValueError) as exc:
fl.format = format
assert exc.value.args[0] == "Invalid format: " + format
@pytest.mark.parametrize(('format', 'base_spacing'), [('dd', 1. * u.deg),
('dd:mm', 1. * u.arcmin),
('dd:mm:ss', 1. * u.arcsec),
('dd:mm:ss.ss', 0.01 * u.arcsec),
('hh', 15. * u.deg),
('hh:mm', 15. * u.arcmin),
('hh:mm:ss', 15. * u.arcsec),
('hh:mm:ss.ss', 0.15 * u.arcsec),
('d', 1. * u.deg),
('d.d', 0.1 * u.deg),
('d.dd', 0.01 * u.deg),
('d.ddd', 0.001 * u.deg),
('m', 1. * u.arcmin),
('m.m', 0.1 * u.arcmin),
('m.mm', 0.01 * u.arcmin),
('s', 1. * u.arcsec),
('s.s', 0.1 * u.arcsec),
('s.ss', 0.01 * u.arcsec),
])
def test_base_spacing(self, format, base_spacing):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = AngleFormatterLocator()
fl.spacing = 0.032 * u.deg
fl.format = 'dd:mm:ss'
assert_almost_equal(fl.spacing.to_value(u.arcsec), 115.)
def test_decimal_values(self):
# Regression test for a bug that meant that the spacing was not
# determined correctly for decimal coordinates
fl = AngleFormatterLocator()
fl.format = 'd.dddd'
assert_quantity_allclose(fl.locator(266.9730, 266.9750)[0],
[266.9735, 266.9740, 266.9745, 266.9750] * u.deg)
fl = AngleFormatterLocator(decimal=True, format_unit=u.hourangle, number=4)
assert_quantity_allclose(fl.locator(266.9730, 266.9750)[0],
[17.79825, 17.79830] * u.hourangle)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.arcsec, decimal=True)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[1000., 1200., 1400., 1600., 1800., 2000.] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.degree, decimal=False)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[15., 20., 25., 30., 35.] * u.arcmin)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.hourangle, decimal=False)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[60., 75., 90., 105., 120., 135.] * (15 * u.arcsec))
fl = AngleFormatterLocator(unit=u.arcsec)
fl.format = 'dd:mm:ss'
assert_quantity_allclose(fl.locator(0.9, 1.1)[0], [1] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, spacing=0.2 * u.arcsec)
assert_quantity_allclose(fl.locator(0.3, 0.9)[0], [0.4, 0.6, 0.8] * u.arcsec)
@pytest.mark.parametrize(('spacing', 'string'), [(2 * u.deg, '15\xb0'),
(2 * u.arcmin, '15\xb024\''),
(2 * u.arcsec, '15\xb023\'32"'),
(0.1 * u.arcsec, '15\xb023\'32.0"')])
def test_formatter_no_format(self, spacing, string):
fl = AngleFormatterLocator()
assert fl.formatter([15.392231] * u.degree, spacing)[0] == string
@pytest.mark.parametrize(('format_unit', 'decimal', 'spacing', 'string'),
[(u.degree, False, 2 * u.degree, '15\xb0'),
(u.degree, False, 2 * u.arcmin, '15\xb024\''),
(u.degree, False, 2 * u.arcsec, '15\xb023\'32"'),
(u.degree, False, 0.1 * u.arcsec, '15\xb023\'32.0"'),
(u.hourangle, False, 15 * u.degree, '1h'),
(u.hourangle, False, 15 * u.arcmin, '1h02m'),
(u.hourangle, False, 15 * u.arcsec, '1h01m34s'),
(u.hourangle, False, 1.5 * u.arcsec, '1h01m34.1s'),
(u.degree, True, 15 * u.degree, '15'),
(u.degree, True, 0.12 * u.degree, '15.39'),
(u.degree, True, 0.0036 * u.arcsec, '15.392231'),
(u.arcmin, True, 15 * u.degree, '924'),
(u.arcmin, True, 0.12 * u.degree, '923.5'),
(u.arcmin, True, 0.1 * u.arcmin, '923.5'),
(u.arcmin, True, 0.0002 * u.arcmin, '923.5339'),
# Make sure that specifying None defaults to
# decimal for non-degree or non-hour angles
(u.arcsec, None, 0.01 * u.arcsec, '55412.03')])
def test_formatter_no_format_with_units(self, format_unit, decimal, spacing, string):
# Check the formatter works when specifying the default units and
# decimal behavior to use.
fl = AngleFormatterLocator(unit=u.degree, format_unit=format_unit, decimal=decimal)
assert fl.formatter([15.392231] * u.degree, spacing, format='ascii')[0] == string
def test_incompatible_unit_decimal(self):
with pytest.raises(UnitsError) as exc:
AngleFormatterLocator(unit=u.arcmin, decimal=False)
assert exc.value.args[0] == 'Units should be degrees or hours when using non-decimal (sexagesimal) mode'
class TestScalarFormatterLocator:
def test_no_options(self):
fl = ScalarFormatterLocator(unit=u.m)
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, number=5)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(number=5, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, number=5, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
def test_values(self):
fl = ScalarFormatterLocator(values=[0.1, 1., 14.] * u.m, unit=u.m)
assert fl.values.value.tolist() == [0.1, 1., 14.]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [0.1, 1., 14.])
def test_number(self):
fl = ScalarFormatterLocator(number=7, unit=u.m)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, np.linspace(36., 54., 10))
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, np.linspace(34.4, 36, 9))
fl.format = 'x'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35., 36.])
def test_spacing(self):
fl = ScalarFormatterLocator(spacing=3. * u.m)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3. * u.m
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [36., 39., 42., 45., 48., 51., 54.])
fl.spacing = 0.5 * u.m
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [34.5, 35., 35.5, 36.])
fl.format = 'x'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35., 36.])
def test_minor_locator(self):
fl = ScalarFormatterLocator(unit=u.m)
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(minor_values.value, [36., 37., 38., 39., 41., 42.,
43., 44., 46., 47., 48., 49., 51., 52., 53., 54.])
print('minor_values: ' + str(minor_values))
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1., 14.] * u.m
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [])
@pytest.mark.parametrize(('format', 'string'), [('x', '15'),
('x.x', '15.4'),
('x.xx', '15.39'),
('x.xxx', '15.392'),
('%g', '15.3922'),
('%f', '15.392231'),
('%.2f', '15.39'),
('%.3f', '15.392')])
def test_format(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(('format', 'string'), [('x', '1539'),
('x.x', '1539.2'),
('x.xx', '1539.22'),
('x.xxx', '1539.223')])
def test_format_unit(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
fl.format_unit = u.cm
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(('format'), ['dd', 'dd:mm', 'xx:mm', 'mx.xxx'])
def test_invalid_formats(self, format):
fl = ScalarFormatterLocator(number=5, unit=u.m)
with pytest.raises(ValueError) as exc:
fl.format = format
assert exc.value.args[0] == "Invalid format: " + format
@pytest.mark.parametrize(('format', 'base_spacing'), [('x', 1. * u.m),
('x.x', 0.1 * u.m),
('x.xxx', 0.001 * u.m)])
def test_base_spacing(self, format, base_spacing):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = ScalarFormatterLocator(unit=u.m)
fl.spacing = 0.032 * u.m
fl.format = 'x.xx'
assert_almost_equal(fl.spacing.to_value(u.m), 0.03)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[1000., 1200., 1400., 1600., 1800., 2000.] * u.cm)
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
fl.format = 'x.x'
assert_quantity_allclose(fl.locator(1, 19)[0], [10] * u.cm)
|
93934bcd06776cb54100a7a3da43ea5445f98ab519ad3b83d9494418ade45a8e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from ..core import WCSAxes
import matplotlib.pyplot as plt
from matplotlib.backend_bases import KeyEvent
from ....wcs import WCS
from ....coordinates import FK5
from ....time import Time
from ....tests.image_tests import ignore_matplotlibrc
from .test_images import BaseImageTests
class TestDisplayWorldCoordinate(BaseImageTests):
@ignore_matplotlibrc
def test_overlay_coords(self, tmpdir):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs)
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test1.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '0\xb029\'45" -0\xb029\'20" (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
event3 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event3.key, guiEvent=event3)
# Test that it still displays world coords when there are no overlay coords
string_world2 = ax._display_world_coords(0.523412, 0.518311)
assert string_world2 == '0\xb029\'45" -0\xb029\'20" (world)'
overlay = ax.get_coords_overlay('fk5')
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test2.png').strpath)
event4 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event4.key, guiEvent=event4)
# Test that it displays the overlay world coordinates
string_world3 = ax._display_world_coords(0.523412, 0.518311)
assert string_world3 == '267.176 -28\xb045\'56" (world, overlay 1)'
overlay = ax.get_coords_overlay(FK5())
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test3.png').strpath)
event5 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event4.key, guiEvent=event4)
# Test that it displays the overlay world coordinates
string_world4 = ax._display_world_coords(0.523412, 0.518311)
assert string_world4 == '267.176 -28\xb045\'56" (world, overlay 2)'
overlay = ax.get_coords_overlay(FK5(equinox=Time("J2030")))
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test4.png').strpath)
event6 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event5.key, guiEvent=event6)
# Test that it displays the overlay world coordinates
string_world5 = ax._display_world_coords(0.523412, 0.518311)
assert string_world5 == '267.652 -28\xb046\'23" (world, overlay 3)'
@ignore_matplotlibrc
def test_cube_coords(self, tmpdir):
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('y', 50, 'x'))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '2563 3h26m52.0s (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.key_press_event(event1.key, guiEvent=event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
|
fe1f1ad87142a81c209ec2bb4642a27f7a20228f04d334e637cc86f06b72be11 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from unittest.mock import patch
import pytest
import matplotlib.pyplot as plt
from ..core import WCSAxes
from .... import units as u
from ....tests.image_tests import ignore_matplotlibrc
@ignore_matplotlibrc
def test_getaxislabel():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
ax.coords[0].set_axislabel("X")
ax.coords[1].set_axislabel("Y")
assert ax.coords[0].get_axislabel() == "X"
assert ax.coords[1].get_axislabel() == "Y"
@pytest.fixture
def ax():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
return ax
def assert_label_draw(ax, x_label, y_label):
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
with patch.object(ax.coords[0].axislabels, 'set_position') as pos1:
with patch.object(ax.coords[1].axislabels, 'set_position') as pos2:
ax.figure.canvas.draw()
assert pos1.call_count == x_label
assert pos2.call_count == y_label
@ignore_matplotlibrc
def test_label_visibility_rules_default(ax):
assert_label_draw(ax, True, True)
@ignore_matplotlibrc
def test_label_visibility_rules_label(ax):
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, False, False)
@ignore_matplotlibrc
def test_label_visibility_rules_ticks(ax):
ax.coords[0].set_axislabel_visibility_rule('ticks')
ax.coords[1].set_axislabel_visibility_rule('ticks')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, False)
@ignore_matplotlibrc
def test_label_visibility_rules_always(ax):
ax.coords[0].set_axislabel_visibility_rule('always')
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, True)
|
720ca307186b0fd3f66d10825f4e1fd7e39fb6a0ef5d6a40fbf96f841a58c406 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Rectangle
from matplotlib import rc_context
from .... import units as u
from ....io import fits
from ....wcs import WCS
from ....coordinates import SkyCoord
from ..patches import SphericalCircle
from .. import WCSAxes
from . import datasets
from ....tests.image_tests import IMAGE_REFERENCE_DIR
from ..frame import EllipticalFrame
class BaseImageTests:
@classmethod
def setup_class(cls):
cls._data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
msx_header = os.path.join(cls._data_dir, 'msx_header')
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = os.path.join(cls._data_dir, 'rosat_header')
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = os.path.join(cls._data_dir, '2MASS_k_header')
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = os.path.join(cls._data_dir, 'cube_header')
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = os.path.join(cls._data_dir, 'slice_header')
cls.slice_header = fits.Header.fromtextfile(slice_header)
class TestBasic(BaseImageTests):
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='image_plot.png',
tolerance=0, style={})
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=1.5)
@pytest.mark.parametrize('axisbelow', [True, False, 'line'])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
ax.grid()
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30., 50.), 60., 50., facecolor='green', edgecolor='red')
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='contour_overlay.png',
tolerance=0, style={})
def test_contour_overlay(self):
# Test for overlaying contours on images
hdu_msx = datasets.fetch_msx_hdu()
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(hdu_msx.data, transform=ax.get_transform(wcs_msx),
colors='orange', levels=[2.5e-5, 5e-5, 1.e-4])
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0., 720.)
ax.set_ylim(0., 720.)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='overlay_features_image.png',
tolerance=0, style={})
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.25, 0.25, 0.65, 0.65],
projection=WCS(self.msx_header), aspect='equal')
# Change the format of the ticks
ax.coords[0].set_major_formatter('dd:mm:ss')
ax.coords[1].set_major_formatter('dd:mm:ss.ssss')
# Overlay grid on image
ax.grid(color='red', alpha=1.0, lw=1, linestyle='dashed')
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords['glon'].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords['glat'].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords['glon'].set_axislabel('Galactic Longitude', minpad=1.6)
ax.coords['glat'].set_axislabel('Galactic Latitude', minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color('red')
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == 'red'
assert ax.coords.frame.get_linewidth() == 2
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='curvlinear_grid_patches_image.png',
tolerance=0, style={})
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.rosat_header), aspect='equal')
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color='black', alpha=1.0, lw=1, linestyle='dashed')
p = Circle((300, 100), radius=40, ec='yellow', fc='none')
ax.add_patch(p)
p = Circle((30., 20.), radius=20., ec='orange', fc='none',
transform=ax.get_transform('world'))
ax.add_patch(p)
p = Circle((60., 50.), radius=20., ec='red', fc='none',
transform=ax.get_transform('fk5'))
ax.add_patch(p)
p = Circle((40., 60.), radius=20., ec='green', fc='none',
transform=ax.get_transform('galactic'))
ax.add_patch(p)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='cube_slice_image.png',
tolerance=0, style={})
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel('Velocity m/s')
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1,
exclude_overlapping=True)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1,
exclude_overlapping=True)
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
ax.coords[2].grid(grid_type='contours', color='red', linestyle='solid')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='cube_slice_image_lonlat.png',
tolerance=0, style={})
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=('x', 'y', 50), aspect='equal')
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type='contours', color='blue', linestyle='solid')
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
ax.plot_coord(c, 'o')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='changed_axis_units.png',
tolerance=0, style={})
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_major_formatter('x.xx')
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel('Velocity km/s')
ax.coords[1].set_ticks(width=1, exclude_overlapping=True)
ax.coords[2].set_ticks(width=1, exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='minor_ticks_image.png',
tolerance=0, style={})
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='ticks_labels.png',
tolerance=0, style={})
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color='blue', alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color='red', alpha=0.9, width=1)
ax.coords[0].set_ticks_position('all')
ax.coords[1].set_ticks_position('all')
ax.coords[0].set_axislabel('X-axis', size=20)
ax.coords[1].set_axislabel('Y-axis', color='green', size=25,
weight='regular', style='normal',
family='cmtt10')
ax.coords[0].set_axislabel_position('t')
ax.coords[1].set_axislabel_position('r')
ax.coords[0].set_ticklabel(color='purple', size=15, alpha=1,
weight='light', style='normal',
family='cmss10')
ax.coords[1].set_ticklabel(color='black', size=18, alpha=0.9,
weight='bold', family='cmr10')
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('r')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='rcparams.png',
tolerance=0, style={})
def test_rcparams(self):
# Test default style (matplotlib.rcParams) for ticks and gridlines
with rc_context({
'xtick.color': 'red',
'xtick.major.size': 20,
'xtick.major.width': 2,
'grid.color': 'blue',
'grid.linestyle': ':',
'grid.linewidth': 1,
'grid.alpha': 0.5}):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.coords[0].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='tick_angles.png',
tolerance=0, style={})
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='tick_angles_non_square_axes.png',
tolerance=0, style={})
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='set_coord_type.png',
tolerance=0, style={})
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6],
projection=WCS(self.msx_header),
aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type('scalar')
ax.coords[1].set_coord_type('scalar')
ax.coords[0].set_major_formatter('x.xxx')
ax.coords[1].set_major_formatter('x.xxx')
ax.coords[0].set_ticks(exclude_overlapping=True)
ax.coords[1].set_ticks(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='test_ticks_regression_1.png',
tolerance=0, style={})
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5],
projection=wcs, aspect='auto')
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('all')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
filename='test_axislabels_regression.png',
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='auto')
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[1].ticklabels.set_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_noncelestial_angular(self, tmpdir):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['solar-x', 'solar-y']
wcs.wcs.cunit = ['arcsec', 'arcsec']
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin='lower')
ax.coords[0].set_coord_type('longitude', coord_wrap=180)
ax.coords[1].set_coord_type('latitude')
ax.coords[0].set_major_formatter('s.s')
ax.coords[1].set_major_formatter('s.s')
ax.grid(color='white', ls='solid')
# Force drawing (needed for format_coord)
fig.savefig(tmpdir.join('nothing').strpath)
# TODO: the formatted string should show units
assert ax.format_coord(512, 512) == "513.0 513.0 (world)"
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_patches_distortion(self, tmpdir):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='equal')
# Pixel coordinates
r = Rectangle((30., 50.), 60., 50., edgecolor='green', facecolor='none')
ax.add_patch(r)
# FK5 coordinates
r = Rectangle((266.4, -28.9), 0.3, 0.3, edgecolor='cyan', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
# FK5 coordinates
c = Circle((266.4, -29.1), 0.15, edgecolor='magenta', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(c)
# Pixel coordinates
ax.scatter([40, 100, 130], [30, 130, 60], s=100, edgecolor='red', facecolor=(1, 0, 0, 0.5))
# World coordinates (should not be distorted)
ax.scatter(266.78238, -28.769255, transform=ax.get_transform('fk5'), s=300,
edgecolor='red', facecolor='none')
# World coordinates (should not be distorted)
r = SphericalCircle((266.4 * u.deg, -29.1 * u.deg), 0.15 * u.degree,
edgecolor='purple', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={'text.usetex': True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
|
4d2a89d31df9ba1b048f98d225f5b39c04fd98c59e0b400cd8badd3147c83fed | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from ...utils.data import get_pkg_data_contents, get_pkg_data_filename
from ...time import Time
from ... import units as u
from ..wcs import WCS
from ..utils import (proj_plane_pixel_scales, is_proj_plane_distorted,
non_celestial_pixel_scales, wcs_to_celestial_frame,
celestial_frame_to_wcs, skycoord_to_pixel,
pixel_to_skycoord, custom_wcs_to_frame_mappings,
custom_frame_to_wcs_mappings, add_stokes_axis_to_wcs)
def test_wcs_dropping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
def test_wcs_swapping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
@pytest.mark.parametrize('ndim', (2, 3))
def test_add_stokes(ndim):
wcs = WCS(naxis=ndim)
for ii in range(ndim + 1):
outwcs = add_stokes_axis_to_wcs(wcs, ii)
assert outwcs.wcs.naxis == ndim + 1
assert outwcs.wcs.ctype[ii] == 'STOKES'
assert outwcs.wcs.cname[ii] == 'STOKES'
def test_slice():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
assert np.all(slice_wcs.wcs.crpix == np.array([1, 0]))
assert slice_wcs._naxis == [1000, 499]
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
assert slice_wcs._naxis == [250, 250]
slice_wcs = mywcs.slice([slice(None, None, 2), slice(0, None, 2)])
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.2]))
assert slice_wcs._naxis == [500, 250]
# Non-integral values do not alter the naxis attribute
slice_wcs = mywcs.slice([slice(50.), slice(20.)])
assert slice_wcs._naxis == [1000, 500]
slice_wcs = mywcs.slice([slice(50.), slice(20)])
assert slice_wcs._naxis == [20, 500]
slice_wcs = mywcs.slice([slice(50), slice(20.5)])
assert slice_wcs._naxis == [1000, 50]
def test_slice_getitem():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
mywcs.wcs.crpix = [2, 2]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.875, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
# Default: numpy order
slice_wcs = mywcs[1::2]
assert np.all(slice_wcs.wcs.crpix == np.array([2, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.1, 0.2]))
def test_slice_fitsorder():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0, 1]))
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 0.625]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.4]))
slice_wcs = mywcs.slice([slice(1, None, 2)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 1]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.1]))
def test_invalid_slice():
mywcs = WCS(naxis=2)
with pytest.raises(ValueError) as exc:
mywcs[0]
assert exc.value.args[0] == ("Cannot downsample a WCS with indexing. Use "
"wcs.sub or wcs.dropaxis if you want to remove "
"axes.")
with pytest.raises(ValueError) as exc:
mywcs[0, ::2]
assert exc.value.args[0] == ("Cannot downsample a WCS with indexing. Use "
"wcs.sub or wcs.dropaxis if you want to remove "
"axes.")
def test_axis_names():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT-LSR', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
mywcs.wcs.cname = ['RA', 'DEC', 'VOPT', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
def test_celestial():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT', 'STOKES']
cel = mywcs.celestial
assert tuple(cel.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert cel.axis_type_names == ['RA', 'DEC']
def test_wcs_to_celestial_frame():
# Import astropy.coordinates here to avoid circular imports
from ...coordinates.builtin_frames import ICRS, ITRS, FK5, FK4, Galactic
mywcs = WCS(naxis=2)
with pytest.raises(ValueError) as exc:
assert wcs_to_celestial_frame(mywcs) is None
assert exc.value.args[0] == "Could not determine celestial frame corresponding to the specified WCS object"
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
with pytest.raises(ValueError):
assert wcs_to_celestial_frame(mywcs) is None
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs.wcs.equinox = 1987.
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK5)
assert frame.equinox == Time(1987., format='jyear')
mywcs.wcs.equinox = 1982
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK4)
assert frame.equinox == Time(1982., format='byear')
mywcs.wcs.equinox = np.nan
mywcs.wcs.ctype = ['GLON-SIN', 'GLAT-SIN']
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
mywcs.wcs.ctype = ['TLON-CAR', 'TLAT-CAR']
mywcs.wcs.dateobs = '2017-08-17T12:41:04.430'
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ITRS)
assert frame.obstime == Time('2017-08-17T12:41:04.430')
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.radesys = 'ICRS'
for equinox in [np.nan, 1987, 1982]:
mywcs.wcs.equinox = equinox
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# Flipped order
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['DEC--TAN', 'RA---TAN']
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# More than two dimensions
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ['DEC--TAN', 'VELOCITY', 'RA---TAN']
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
def test_wcs_to_celestial_frame_extend():
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
class OffsetFrame:
pass
def identify_offset(wcs):
if wcs.wcs.ctype[0].endswith('OFFSET') and wcs.wcs.ctype[1].endswith('OFFSET'):
return OffsetFrame()
with custom_wcs_to_frame_mappings(identify_offset):
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, OffsetFrame)
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
def test_celestial_frame_to_wcs():
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import ICRS, ITRS, FK5, FK4, FK4NoETerms, Galactic, BaseCoordinateFrame
class FakeFrame(BaseCoordinateFrame):
pass
frame = FakeFrame()
with pytest.raises(ValueError) as exc:
celestial_frame_to_wcs(frame)
assert exc.value.args[0] == ("Could not determine WCS corresponding to "
"the specified coordinate frame.")
frame = ICRS()
mywcs = celestial_frame_to_wcs(frame)
mywcs.wcs.set()
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'ICRS'
assert np.isnan(mywcs.wcs.equinox)
assert mywcs.wcs.lonpole == 180
assert mywcs.wcs.latpole == 0
frame = FK5(equinox='J1987')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK5'
assert mywcs.wcs.equinox == 1987.
frame = FK4(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4'
assert mywcs.wcs.equinox == 1982.
frame = FK4NoETerms(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4-NO-E'
assert mywcs.wcs.equinox == 1982.
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('GLON-CAR', 'GLAT-CAR')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
mywcs.wcs.crval = [100, -30]
mywcs.wcs.set()
assert_allclose((mywcs.wcs.lonpole, mywcs.wcs.latpole), (180, 60))
frame = ITRS(obstime=Time('2017-08-17T12:41:04.43'))
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == '2017-08-17T12:41:04.430'
frame = ITRS()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == Time('J2000').utc.isot
def test_celestial_frame_to_wcs_extend():
class OffsetFrame:
pass
frame = OffsetFrame()
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def identify_offset(frame, projection=None):
if isinstance(frame, OffsetFrame):
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
return wcs
with custom_frame_to_wcs_mappings(identify_offset):
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('XOFFSET', 'YOFFSET')
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def test_pixscale_nodrop():
mywcs = WCS(naxis=2)
mywcs.wcs.cdelt = [0.1, 0.2]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
def test_pixscale_withdrop():
mywcs = WCS(naxis=3)
mywcs.wcs.cdelt = [0.1, 0.2, 1]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT']
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2, 1]
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
def test_pixscale_cd():
mywcs = WCS(naxis=2)
mywcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_cd_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cd = [[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_pc_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cdelt = [-scale, scale]
mywcs.wcs.pc = [[np.cos(rho), -np.sin(rho)],
[np.sin(rho), np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize(('cdelt', 'pc', 'pccd'),
(([0.1, 0.2], np.eye(2), np.diag([0.1, 0.2])),
([0.1, 0.2, 0.3], np.eye(3), np.diag([0.1, 0.2, 0.3])),
([1, 1, 1], np.diag([0.1, 0.2, 0.3]), np.diag([0.1, 0.2, 0.3]))))
def test_pixel_scale_matrix(cdelt, pc, pccd):
mywcs = WCS(naxis=(len(cdelt)))
mywcs.wcs.cdelt = cdelt
mywcs.wcs.pc = pc
assert_almost_equal(mywcs.pixel_scale_matrix, pccd)
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], False),
(['RA---TAN', 'FREQ'], False),))
def test_is_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.is_celestial == cel
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], True),
(['RA---TAN', 'FREQ'], False),))
def test_has_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.has_celestial == cel
@pytest.mark.parametrize(('cdelt', 'pc', 'cd'),
((np.array([0.1, 0.2]), np.eye(2), np.eye(2)),
(np.array([1, 1]), np.diag([0.1, 0.2]), np.eye(2)),
(np.array([0.1, 0.2]), np.eye(2), None),
(np.array([0.1, 0.2]), None, np.eye(2)),
))
def test_noncelestial_scale(cdelt, pc, cd):
mywcs = WCS(naxis=2)
if cd is not None:
mywcs.wcs.cd = cd
if pc is not None:
mywcs.wcs.pc = pc
mywcs.wcs.cdelt = cdelt
mywcs.wcs.ctype = ['RA---TAN', 'FREQ']
ps = non_celestial_pixel_scales(mywcs)
assert_almost_equal(ps.to_value(u.deg), np.array([0.1, 0.2]))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel(mode):
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import SkyCoord
header = get_pkg_data_contents('maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# Make sure you can specify a different class using ``cls`` keyword
class SkyCoord2(SkyCoord):
pass
new2 = pixel_to_skycoord(xp, yp, wcs, mode=mode,
cls=SkyCoord2).transform_to('icrs')
assert new2.__class__ is SkyCoord2
assert_allclose(new2.ra.degree, ref.ra.degree)
assert_allclose(new2.dec.degree, ref.dec.degree)
def test_is_proj_plane_distorted():
# non-orthogonal CD:
wcs = WCS(naxis=2)
wcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert(is_proj_plane_distorted(wcs))
# almost orthogonal CD:
wcs.wcs.cd = [[0.1 + 2.0e-7, 1.7e-7], [1.2e-7, 0.1 - 1.3e-7]]
assert(not is_proj_plane_distorted(wcs))
# real case:
header = get_pkg_data_filename('data/sip.fits')
wcs = WCS(header)
assert(is_proj_plane_distorted(wcs))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel_distortions(mode):
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import SkyCoord
header = get_pkg_data_filename('data/sip.fits')
wcs = WCS(header)
ref = SkyCoord(202.50 * u.deg, 47.19 * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
|
657c2465cfb5661c4484a0cf832be52602ee4cdad5290a730801751f1c7a709b | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles the CDS string format for units
"""
import operator
import os
import re
from .base import Base
from . import core, utils
from ..utils import is_effectively_unity
from ...utils import classproperty
from ...utils.misc import did_you_mean
# TODO: Support logarithmic units using bracketed syntax
class CDS(Base):
"""
Support the `Centre de Données astronomiques de Strasbourg
<http://cds.u-strasbg.fr/>`_ `Standards for Astronomical
Catalogues 2.0 <http://cds.u-strasbg.fr/doc/catstd-3.2.htx>`_
format, and the `complete set of supported units
<http://vizier.u-strasbg.fr/cgi-bin/Unit>`_. This format is used
by VOTable up to version 1.2.
"""
_tokens = (
'PRODUCT',
'DIVISION',
'OPEN_PAREN',
'CLOSE_PAREN',
'X',
'SIGN',
'UINT',
'UFLOAT',
'UNIT'
)
@classproperty(lazy=True)
def _units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@staticmethod
def _generate_unit_names():
from .. import cds
from ... import units as u
names = {}
for key, val in cds.__dict__.items():
if isinstance(val, u.UnitBase):
names[key] = val
return names
@classmethod
def _make_lexer(cls):
from ...extern.ply import lex
tokens = cls._tokens
t_PRODUCT = r'\.'
t_DIVISION = r'/'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d+)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
def t_X(t): # multiplication for factor in front of unit
r'[x×]'
return t
def t_UNIT(t):
r'\%|°|\\h|((?!\d)\w)+'
t.value = cls._get_unit(t)
return t
t_ignore = ''
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'cds_lextab.py'))
lexer = lex.lex(optimize=True, lextab='cds_lextab',
outputdir=os.path.dirname(__file__),
reflags=int(re.UNICODE))
if not lexer_exists:
cls._add_tab_header('cds_lextab')
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `Standards
for Astronomical Catalogues 2.0
<http://cds.u-strasbg.fr/doc/catstd-3.2.htx>`_, which is not
terribly precise. The exact grammar is here is based on the
YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
from ...extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : factor combined_units
| combined_units
| factor
'''
from ..core import Unit
if len(p) == 3:
p[0] = Unit(p[1] * p[2])
else:
p[0] = Unit(p[1])
def p_combined_units(p):
'''
combined_units : product_of_units
| division_of_units
'''
p[0] = p[1]
def p_product_of_units(p):
'''
product_of_units : unit_expression PRODUCT combined_units
| unit_expression
'''
if len(p) == 4:
p[0] = p[1] * p[3]
else:
p[0] = p[1]
def p_division_of_units(p):
'''
division_of_units : DIVISION unit_expression
| unit_expression DIVISION combined_units
'''
if len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1] / p[3]
def p_unit_expression(p):
'''
unit_expression : unit_with_power
| OPEN_PAREN combined_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_factor(p):
'''
factor : signed_float X UINT signed_int
| UINT X UINT signed_int
| UINT signed_int
| UINT
| signed_float
'''
if len(p) == 5:
if p[3] != 10:
raise ValueError(
"Only base ten exponents are allowed in CDS")
p[0] = p[1] * 10.0 ** p[4]
elif len(p) == 3:
if p[1] != 10:
raise ValueError(
"Only base ten exponents are allowed in CDS")
p[0] = 10.0 ** p[2]
elif len(p) == 2:
p[0] = p[1]
def p_unit_with_power(p):
'''
unit_with_power : UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] ** p[2]
def p_numeric_power(p):
'''
numeric_power : sign UINT
'''
p[0] = p[1] * p[2]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'cds_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='cds_parsetab',
outputdir=os.path.dirname(__file__),
write_tables=True)
if not parser_exists:
cls._add_tab_header('cds_parsetab')
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {0}, {1}".format(
t.lexpos, str(e)))
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{0}' not supported by the CDS SAC "
"standard. {1}".format(
unit, did_you_mean(
unit, cls._units)))
else:
raise ValueError()
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
if ' ' in s:
raise ValueError('CDS unit must not contain whitespace')
if not isinstance(s, str):
s = s.decode('ascii')
# This is a short circuit for the case where the string
# is just a single unit name
try:
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError(str(e))
else:
raise ValueError("Syntax error")
@staticmethod
def _get_unit_name(unit):
return unit.get_format_name('cds')
@classmethod
def _format_unit_list(cls, units):
out = []
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
out.append('{0}{1}'.format(
cls._get_unit_name(base), int(power)))
return '.'.join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
if(unit.physical_type == 'dimensionless' and
is_effectively_unity(unit.scale*100.)):
return '%'
if unit.scale == 1:
s = ''
else:
m, e = utils.split_mantissa_exponent(unit.scale)
parts = []
if m not in ('', '1'):
parts.append(m)
if e:
if not e.startswith('-'):
e = "+" + e
parts.append('10{0}'.format(e))
s = 'x'.join(parts)
pairs = list(zip(unit.bases, unit.powers))
if len(pairs) > 0:
pairs.sort(key=operator.itemgetter(1), reverse=True)
s += cls._format_unit_list(pairs)
elif isinstance(unit, core.NamedUnit):
s = cls._get_unit_name(unit)
return s
|
918a56113e88faba7b44f167962dd2a6cd977d1a2f6ff1d3d09c86b71f0a0a5f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# ogip_lextab.py. This file automatically created by PLY (version 3.10). Don't edit!
_tabversion = '3.10'
_lextokens = set(('CLOSE_PAREN', 'DIVISION', 'LIT10', 'OPEN_PAREN', 'SIGN', 'STAR', 'STARSTAR', 'UFLOAT', 'UINT', 'UNIT', 'UNKNOWN', 'WHITESPACE'))
_lexreflags = 64
_lexliterals = ''
_lexstateinfo = {'INITIAL': 'inclusive'}
_lexstatere = {'INITIAL': [('(?P<t_UFLOAT>(((\\d+\\.?\\d*)|(\\.\\d+))([eE][+-]?\\d+))|(((\\d+\\.\\d*)|(\\.\\d+))([eE][+-]?\\d+)?))|(?P<t_UINT>\\d+)|(?P<t_SIGN>[+-](?=\\d))|(?P<t_X>[x×])|(?P<t_LIT10>10)|(?P<t_UNKNOWN>[Uu][Nn][Kk][Nn][Oo][Ww][Nn])|(?P<t_UNIT>[a-zA-Z][a-zA-Z_]*)|(?P<t_WHITESPACE>[ \t]+)|(?P<t_STARSTAR>\\*\\*)|(?P<t_STAR>\\*)|(?P<t_CLOSE_PAREN>\\))|(?P<t_OPEN_PAREN>\\()|(?P<t_DIVISION>/)', [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, None, None, None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_X', 'X'), ('t_LIT10', 'LIT10'), ('t_UNKNOWN', 'UNKNOWN'), ('t_UNIT', 'UNIT'), (None, 'WHITESPACE'), (None, 'STARSTAR'), (None, 'STAR'), (None, 'CLOSE_PAREN'), (None, 'OPEN_PAREN'), (None, 'DIVISION')])]}
_lexstateignore = {'INITIAL': ''}
_lexstateerrorf = {'INITIAL': 't_error'}
_lexstateeoff = {}
|
f60f337cf34578c77f95812195c8455a49dd197ed540510b61e3e8de9b4be46a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# generic_parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'DOUBLE_STAR STAR PERIOD SOLIDUS CARET OPEN_PAREN CLOSE_PAREN FUNCNAME UNIT SIGN UINT UFLOAT\n main : product_of_units\n | factor product_of_units\n | factor product product_of_units\n | division_product_of_units\n | factor division_product_of_units\n | factor product division_product_of_units\n | inverse_unit\n | factor inverse_unit\n | factor product inverse_unit\n | factor\n \n division_product_of_units : division_product_of_units division product_of_units\n | product_of_units\n \n inverse_unit : division unit_expression\n \n factor : factor_fits\n | factor_float\n | factor_int\n \n factor_float : signed_float\n | signed_float UINT signed_int\n | signed_float UINT power numeric_power\n \n factor_int : UINT\n | UINT signed_int\n | UINT power numeric_power\n | UINT UINT signed_int\n | UINT UINT power numeric_power\n \n factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN\n | UINT power signed_int\n | UINT SIGN UINT\n | UINT OPEN_PAREN signed_int CLOSE_PAREN\n \n product_of_units : unit_expression product product_of_units\n | unit_expression product_of_units\n | unit_expression\n \n unit_expression : function\n | unit_with_power\n | OPEN_PAREN product_of_units CLOSE_PAREN\n \n unit_with_power : UNIT power numeric_power\n | UNIT numeric_power\n | UNIT\n \n numeric_power : sign UINT\n | OPEN_PAREN paren_expr CLOSE_PAREN\n \n paren_expr : sign UINT\n | signed_float\n | frac\n \n frac : sign UINT division sign UINT\n \n sign : SIGN\n |\n \n product : STAR\n | PERIOD\n \n division : SOLIDUS\n \n power : DOUBLE_STAR\n | CARET\n \n signed_int : SIGN UINT\n \n signed_float : sign UINT\n | sign UFLOAT\n \n function_name : FUNCNAME\n \n function : function_name OPEN_PAREN main CLOSE_PAREN\n '
_lr_action_items = {'OPEN_PAREN':([0,3,6,7,8,9,10,11,12,13,14,16,17,18,19,21,23,26,27,28,29,34,36,38,39,41,42,43,46,47,53,54,55,58,59,62,63,64,66,67,72,73,75,76,77,78,80,],[13,13,13,-14,-15,-16,13,-32,-33,13,35,-17,-48,41,45,-54,13,-46,-47,13,13,57,-21,-49,-50,13,45,-36,-52,-53,-34,-23,45,-26,-22,-27,-18,45,-35,-38,-24,-51,-28,-19,-55,-39,-25,]),'UINT':([0,14,15,16,17,19,20,34,37,38,39,41,42,44,45,46,47,55,56,57,60,64,69,81,82,],[14,33,-44,40,-48,-45,46,-45,62,-49,-50,14,-45,67,-45,-52,-53,-45,73,-45,73,-45,79,-45,83,]),'SOLIDUS':([0,2,3,4,6,7,8,9,11,12,14,16,19,22,23,24,26,27,30,36,41,43,46,47,48,49,51,52,53,54,58,59,62,63,66,67,72,73,75,76,77,78,79,80,],[17,-12,17,17,-31,-14,-15,-16,-32,-33,-20,-17,-37,-12,17,17,-46,-47,-30,-21,17,-36,-52,-53,-12,17,-11,-29,-34,-23,-26,-22,-27,-18,-35,-38,-24,-51,-28,-19,-55,-39,17,-25,]),'UNIT':([0,3,6,7,8,9,10,11,12,13,14,16,17,19,23,26,27,28,29,36,41,43,46,47,53,54,58,59,62,63,66,67,72,73,75,76,77,78,80,],[19,19,19,-14,-15,-16,19,-32,-33,19,-20,-17,-48,-37,19,-46,-47,19,19,-21,19,-36,-52,-53,-34,-23,-26,-22,-27,-18,-35,-38,-24,-51,-28,-19,-55,-39,-25,]),'FUNCNAME':([0,3,6,7,8,9,10,11,12,13,14,16,17,19,23,26,27,28,29,36,41,43,46,47,53,54,58,59,62,63,66,67,72,73,75,76,77,78,80,],[21,21,21,-14,-15,-16,21,-32,-33,21,-20,-17,-48,-37,21,-46,-47,21,21,-21,21,-36,-52,-53,-34,-23,-26,-22,-27,-18,-35,-38,-24,-51,-28,-19,-55,-39,-25,]),'SIGN':([0,14,17,19,33,34,35,38,39,40,41,42,45,55,57,64,81,],[15,37,-48,15,56,60,56,-49,-50,56,15,15,15,15,60,15,15,]),'UFLOAT':([0,15,20,41,45,57,60,69,],[-45,-44,47,-45,-45,-45,-44,47,]),'$end':([1,2,3,4,5,6,7,8,9,11,12,14,16,19,22,24,25,30,31,36,43,46,47,48,49,50,51,52,53,54,58,59,62,63,66,67,72,73,75,76,77,78,80,],[0,-1,-10,-4,-7,-31,-14,-15,-16,-32,-33,-20,-17,-37,-2,-5,-8,-30,-13,-21,-36,-52,-53,-3,-6,-9,-11,-29,-34,-23,-26,-22,-27,-18,-35,-38,-24,-51,-28,-19,-55,-39,-25,]),'CLOSE_PAREN':([2,3,4,5,6,7,8,9,11,12,14,16,19,22,24,25,30,31,32,36,43,46,47,48,49,50,51,52,53,54,58,59,61,62,63,65,66,67,68,70,71,72,73,74,75,76,77,78,79,80,83,],[-1,-10,-4,-7,-31,-14,-15,-16,-32,-33,-20,-17,-37,-2,-5,-8,-30,-13,53,-21,-36,-52,-53,-3,-6,-9,-11,-29,-34,-23,-26,-22,75,-27,-18,77,-35,-38,78,-41,-42,-24,-51,80,-28,-19,-55,-39,-40,-25,-43,]),'STAR':([3,6,7,8,9,11,12,14,16,19,36,43,46,47,53,54,58,59,62,63,66,67,72,73,75,76,77,78,80,],[26,26,-14,-15,-16,-32,-33,-20,-17,-37,-21,-36,-52,-53,-34,-23,-26,-22,-27,-18,-35,-38,-24,-51,-28,-19,-55,-39,-25,]),'PERIOD':([3,6,7,8,9,11,12,14,16,19,36,43,46,47,53,54,58,59,62,63,66,67,72,73,75,76,77,78,80,],[27,27,-14,-15,-16,-32,-33,-20,-17,-37,-21,-36,-52,-53,-34,-23,-26,-22,-27,-18,-35,-38,-24,-51,-28,-19,-55,-39,-25,]),'DOUBLE_STAR':([14,19,33,40,],[38,38,38,38,]),'CARET':([14,19,33,40,],[39,39,39,39,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'main':([0,41,],[1,65,]),'product_of_units':([0,3,6,13,23,28,29,41,],[2,22,30,32,48,51,52,2,]),'factor':([0,41,],[3,3,]),'division_product_of_units':([0,3,23,41,],[4,24,49,4,]),'inverse_unit':([0,3,23,41,],[5,25,50,5,]),'unit_expression':([0,3,6,10,13,23,28,29,41,],[6,6,6,31,6,6,6,6,6,]),'factor_fits':([0,41,],[7,7,]),'factor_float':([0,41,],[8,8,]),'factor_int':([0,41,],[9,9,]),'division':([0,3,4,23,24,41,49,79,],[10,10,28,10,28,10,28,81,]),'function':([0,3,6,10,13,23,28,29,41,],[11,11,11,11,11,11,11,11,11,]),'unit_with_power':([0,3,6,10,13,23,28,29,41,],[12,12,12,12,12,12,12,12,12,]),'signed_float':([0,41,45,57,],[16,16,70,70,]),'function_name':([0,3,6,10,13,23,28,29,41,],[18,18,18,18,18,18,18,18,18,]),'sign':([0,19,34,41,42,45,55,57,64,81,],[20,44,44,20,44,69,44,69,44,82,]),'product':([3,6,],[23,29,]),'power':([14,19,33,40,],[34,42,55,64,]),'signed_int':([14,33,34,35,40,57,],[36,54,58,61,63,74,]),'numeric_power':([19,34,42,55,64,],[43,59,66,72,76,]),'paren_expr':([45,57,],[68,68,]),'frac':([45,57,],[71,71,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> main","S'",1,None,None,None),
('main -> product_of_units','main',1,'p_main','generic.py',193),
('main -> factor product_of_units','main',2,'p_main','generic.py',194),
('main -> factor product product_of_units','main',3,'p_main','generic.py',195),
('main -> division_product_of_units','main',1,'p_main','generic.py',196),
('main -> factor division_product_of_units','main',2,'p_main','generic.py',197),
('main -> factor product division_product_of_units','main',3,'p_main','generic.py',198),
('main -> inverse_unit','main',1,'p_main','generic.py',199),
('main -> factor inverse_unit','main',2,'p_main','generic.py',200),
('main -> factor product inverse_unit','main',3,'p_main','generic.py',201),
('main -> factor','main',1,'p_main','generic.py',202),
('division_product_of_units -> division_product_of_units division product_of_units','division_product_of_units',3,'p_division_product_of_units','generic.py',214),
('division_product_of_units -> product_of_units','division_product_of_units',1,'p_division_product_of_units','generic.py',215),
('inverse_unit -> division unit_expression','inverse_unit',2,'p_inverse_unit','generic.py',225),
('factor -> factor_fits','factor',1,'p_factor','generic.py',231),
('factor -> factor_float','factor',1,'p_factor','generic.py',232),
('factor -> factor_int','factor',1,'p_factor','generic.py',233),
('factor_float -> signed_float','factor_float',1,'p_factor_float','generic.py',239),
('factor_float -> signed_float UINT signed_int','factor_float',3,'p_factor_float','generic.py',240),
('factor_float -> signed_float UINT power numeric_power','factor_float',4,'p_factor_float','generic.py',241),
('factor_int -> UINT','factor_int',1,'p_factor_int','generic.py',254),
('factor_int -> UINT signed_int','factor_int',2,'p_factor_int','generic.py',255),
('factor_int -> UINT power numeric_power','factor_int',3,'p_factor_int','generic.py',256),
('factor_int -> UINT UINT signed_int','factor_int',3,'p_factor_int','generic.py',257),
('factor_int -> UINT UINT power numeric_power','factor_int',4,'p_factor_int','generic.py',258),
('factor_fits -> UINT power OPEN_PAREN signed_int CLOSE_PAREN','factor_fits',5,'p_factor_fits','generic.py',276),
('factor_fits -> UINT power signed_int','factor_fits',3,'p_factor_fits','generic.py',277),
('factor_fits -> UINT SIGN UINT','factor_fits',3,'p_factor_fits','generic.py',278),
('factor_fits -> UINT OPEN_PAREN signed_int CLOSE_PAREN','factor_fits',4,'p_factor_fits','generic.py',279),
('product_of_units -> unit_expression product product_of_units','product_of_units',3,'p_product_of_units','generic.py',298),
('product_of_units -> unit_expression product_of_units','product_of_units',2,'p_product_of_units','generic.py',299),
('product_of_units -> unit_expression','product_of_units',1,'p_product_of_units','generic.py',300),
('unit_expression -> function','unit_expression',1,'p_unit_expression','generic.py',311),
('unit_expression -> unit_with_power','unit_expression',1,'p_unit_expression','generic.py',312),
('unit_expression -> OPEN_PAREN product_of_units CLOSE_PAREN','unit_expression',3,'p_unit_expression','generic.py',313),
('unit_with_power -> UNIT power numeric_power','unit_with_power',3,'p_unit_with_power','generic.py',322),
('unit_with_power -> UNIT numeric_power','unit_with_power',2,'p_unit_with_power','generic.py',323),
('unit_with_power -> UNIT','unit_with_power',1,'p_unit_with_power','generic.py',324),
('numeric_power -> sign UINT','numeric_power',2,'p_numeric_power','generic.py',335),
('numeric_power -> OPEN_PAREN paren_expr CLOSE_PAREN','numeric_power',3,'p_numeric_power','generic.py',336),
('paren_expr -> sign UINT','paren_expr',2,'p_paren_expr','generic.py',345),
('paren_expr -> signed_float','paren_expr',1,'p_paren_expr','generic.py',346),
('paren_expr -> frac','paren_expr',1,'p_paren_expr','generic.py',347),
('frac -> sign UINT division sign UINT','frac',5,'p_frac','generic.py',356),
('sign -> SIGN','sign',1,'p_sign','generic.py',362),
('sign -> <empty>','sign',0,'p_sign','generic.py',363),
('product -> STAR','product',1,'p_product','generic.py',372),
('product -> PERIOD','product',1,'p_product','generic.py',373),
('division -> SOLIDUS','division',1,'p_division','generic.py',379),
('power -> DOUBLE_STAR','power',1,'p_power','generic.py',385),
('power -> CARET','power',1,'p_power','generic.py',386),
('signed_int -> SIGN UINT','signed_int',2,'p_signed_int','generic.py',392),
('signed_float -> sign UINT','signed_float',2,'p_signed_float','generic.py',398),
('signed_float -> sign UFLOAT','signed_float',2,'p_signed_float','generic.py',399),
('function_name -> FUNCNAME','function_name',1,'p_function_name','generic.py',405),
('function -> function_name OPEN_PAREN main CLOSE_PAREN','function',4,'p_function','generic.py',411),
]
|
609242b94056eecd11704b26793dcc73037bcc9d6e440e83f2ebd8a7455059c2 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# cds_lextab.py. This file automatically created by PLY (version 3.10). Don't edit!
_tabversion = '3.10'
_lextokens = set(('CLOSE_PAREN', 'DIVISION', 'OPEN_PAREN', 'PRODUCT', 'SIGN', 'UFLOAT', 'UINT', 'UNIT', 'X'))
_lexreflags = 32
_lexliterals = ''
_lexstateinfo = {'INITIAL': 'inclusive'}
_lexstatere = {'INITIAL': [('(?P<t_UFLOAT>((\\d+\\.?\\d+)|(\\.\\d+))([eE][+-]?\\d+)?)|(?P<t_UINT>\\d+)|(?P<t_SIGN>[+-](?=\\d))|(?P<t_X>[x×])|(?P<t_UNIT>\\%|°|\\\\h|((?!\\d)\\w)+)|(?P<t_CLOSE_PAREN>\\))|(?P<t_OPEN_PAREN>\\()|(?P<t_PRODUCT>\\.)|(?P<t_DIVISION>/)', [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_X', 'X'), ('t_UNIT', 'UNIT'), None, (None, 'CLOSE_PAREN'), (None, 'OPEN_PAREN'), (None, 'PRODUCT'), (None, 'DIVISION')])]}
_lexstateignore = {'INITIAL': ''}
_lexstateerrorf = {'INITIAL': 't_error'}
_lexstateeoff = {}
|
1567ac4f8eaab6f91f37f3db25a2c10421858cc2c0eebadf75a8b76803bbe171 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# ogip_parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'DIVISION OPEN_PAREN CLOSE_PAREN WHITESPACE STARSTAR STAR SIGN UFLOAT LIT10 UINT UNKNOWN UNIT\n main : UNKNOWN\n | complete_expression\n | scale_factor complete_expression\n | scale_factor WHITESPACE complete_expression\n \n complete_expression : product_of_units\n \n product_of_units : unit_expression\n | division unit_expression\n | product_of_units product unit_expression\n | product_of_units division unit_expression\n \n unit_expression : unit\n | UNIT OPEN_PAREN complete_expression CLOSE_PAREN\n | OPEN_PAREN complete_expression CLOSE_PAREN\n | UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power\n | OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power\n \n scale_factor : LIT10 power numeric_power\n | LIT10\n | signed_float\n | signed_float power numeric_power\n | signed_int power numeric_power\n \n division : DIVISION\n | WHITESPACE DIVISION\n | WHITESPACE DIVISION WHITESPACE\n | DIVISION WHITESPACE\n \n product : WHITESPACE\n | STAR\n | WHITESPACE STAR\n | WHITESPACE STAR WHITESPACE\n | STAR WHITESPACE\n \n power : STARSTAR\n \n unit : UNIT\n | UNIT power numeric_power\n \n numeric_power : UINT\n | signed_float\n | OPEN_PAREN signed_int CLOSE_PAREN\n | OPEN_PAREN signed_float CLOSE_PAREN\n | OPEN_PAREN signed_float division UINT CLOSE_PAREN\n \n sign : SIGN\n |\n \n signed_int : SIGN UINT\n \n signed_float : sign UINT\n | sign UFLOAT\n '
_lr_action_items = {'UNKNOWN':([0,],[2,]),'LIT10':([0,],[7,]),'SIGN':([0,25,26,27,28,34,47,59,63,],[13,48,-29,48,48,48,13,48,48,]),'UNIT':([0,4,7,8,11,16,17,19,20,21,22,23,24,30,31,33,36,38,39,42,43,44,45,46,49,50,54,55,60,61,67,],[15,15,-16,-17,15,15,-20,15,-21,15,15,-24,-25,-40,-41,15,-23,-20,-22,-26,-28,-15,-32,-33,-18,-19,-22,-27,-34,-35,-36,]),'OPEN_PAREN':([0,4,7,8,11,15,16,17,19,20,21,22,23,24,25,26,27,28,30,31,33,34,36,38,39,42,43,44,45,46,49,50,54,55,59,60,61,63,67,],[16,16,-16,-17,16,33,16,-20,16,-21,16,16,-24,-25,47,-29,47,47,-40,-41,16,47,-23,-20,-22,-26,-28,-15,-32,-33,-18,-19,-22,-27,47,-34,-35,47,-36,]),'DIVISION':([0,4,5,6,7,8,10,14,15,16,19,23,29,30,31,33,40,41,44,45,46,49,50,52,53,57,58,60,61,64,66,67,],[17,17,20,17,-16,-17,-6,-10,-30,17,38,20,-7,-40,-41,17,-8,-9,-15,-32,-33,-18,-19,-31,-12,17,-11,-34,-35,-14,-13,-36,]),'WHITESPACE':([0,4,6,7,8,10,14,15,16,17,19,20,24,29,30,31,33,38,40,41,42,44,45,46,49,50,52,53,57,58,60,61,64,66,67,],[5,19,23,-16,-17,-6,-10,-30,5,36,5,39,43,-7,-40,-41,5,54,-8,-9,55,-15,-32,-33,-18,-19,-31,-12,5,-11,-34,-35,-14,-13,-36,]),'UINT':([0,12,13,17,20,25,26,27,28,34,36,39,47,48,59,62,63,],[-38,30,32,-20,-21,45,-29,45,45,45,-23,-22,-38,-37,45,65,45,]),'UFLOAT':([0,12,13,25,26,27,28,34,47,48,59,63,],[-38,31,-37,-38,-29,-38,-38,-38,-38,-37,-38,-38,]),'$end':([1,2,3,6,10,14,15,18,29,30,31,37,40,41,45,46,52,53,58,60,61,64,66,67,],[0,-1,-2,-5,-6,-10,-30,-3,-7,-40,-41,-4,-8,-9,-32,-33,-31,-12,-11,-34,-35,-14,-13,-36,]),'CLOSE_PAREN':([6,10,14,15,29,30,31,32,35,40,41,45,46,51,52,53,56,57,58,60,61,64,65,66,67,],[-5,-6,-10,-30,-7,-40,-41,-39,53,-8,-9,-32,-33,58,-31,-12,60,61,-11,-34,-35,-14,67,-13,-36,]),'STAR':([6,10,14,15,23,29,30,31,40,41,45,46,52,53,58,60,61,64,66,67,],[24,-6,-10,-30,42,-7,-40,-41,-8,-9,-32,-33,-31,-12,-11,-34,-35,-14,-13,-36,]),'STARSTAR':([7,8,9,15,30,31,32,53,58,],[26,26,26,26,-40,-41,-39,26,26,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'main':([0,],[1,]),'complete_expression':([0,4,16,19,33,],[3,18,35,37,51,]),'scale_factor':([0,],[4,]),'product_of_units':([0,4,16,19,33,],[6,6,6,6,6,]),'signed_float':([0,25,27,28,34,47,59,63,],[8,46,46,46,46,57,46,46,]),'signed_int':([0,47,],[9,56,]),'unit_expression':([0,4,11,16,19,21,22,33,],[10,10,29,10,10,40,41,10,]),'division':([0,4,6,16,19,33,57,],[11,11,22,11,11,11,62,]),'sign':([0,25,27,28,34,47,59,63,],[12,12,12,12,12,12,12,12,]),'unit':([0,4,11,16,19,21,22,33,],[14,14,14,14,14,14,14,14,]),'product':([6,],[21,]),'power':([7,8,9,15,53,58,],[25,27,28,34,59,63,]),'numeric_power':([25,27,28,34,59,63,],[44,49,50,52,64,66,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> main","S'",1,None,None,None),
('main -> UNKNOWN','main',1,'p_main','ogip.py',195),
('main -> complete_expression','main',1,'p_main','ogip.py',196),
('main -> scale_factor complete_expression','main',2,'p_main','ogip.py',197),
('main -> scale_factor WHITESPACE complete_expression','main',3,'p_main','ogip.py',198),
('complete_expression -> product_of_units','complete_expression',1,'p_complete_expression','ogip.py',209),
('product_of_units -> unit_expression','product_of_units',1,'p_product_of_units','ogip.py',215),
('product_of_units -> division unit_expression','product_of_units',2,'p_product_of_units','ogip.py',216),
('product_of_units -> product_of_units product unit_expression','product_of_units',3,'p_product_of_units','ogip.py',217),
('product_of_units -> product_of_units division unit_expression','product_of_units',3,'p_product_of_units','ogip.py',218),
('unit_expression -> unit','unit_expression',1,'p_unit_expression','ogip.py',232),
('unit_expression -> UNIT OPEN_PAREN complete_expression CLOSE_PAREN','unit_expression',4,'p_unit_expression','ogip.py',233),
('unit_expression -> OPEN_PAREN complete_expression CLOSE_PAREN','unit_expression',3,'p_unit_expression','ogip.py',234),
('unit_expression -> UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power','unit_expression',6,'p_unit_expression','ogip.py',235),
('unit_expression -> OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power','unit_expression',5,'p_unit_expression','ogip.py',236),
('scale_factor -> LIT10 power numeric_power','scale_factor',3,'p_scale_factor','ogip.py',270),
('scale_factor -> LIT10','scale_factor',1,'p_scale_factor','ogip.py',271),
('scale_factor -> signed_float','scale_factor',1,'p_scale_factor','ogip.py',272),
('scale_factor -> signed_float power numeric_power','scale_factor',3,'p_scale_factor','ogip.py',273),
('scale_factor -> signed_int power numeric_power','scale_factor',3,'p_scale_factor','ogip.py',274),
('division -> DIVISION','division',1,'p_division','ogip.py',289),
('division -> WHITESPACE DIVISION','division',2,'p_division','ogip.py',290),
('division -> WHITESPACE DIVISION WHITESPACE','division',3,'p_division','ogip.py',291),
('division -> DIVISION WHITESPACE','division',2,'p_division','ogip.py',292),
('product -> WHITESPACE','product',1,'p_product','ogip.py',298),
('product -> STAR','product',1,'p_product','ogip.py',299),
('product -> WHITESPACE STAR','product',2,'p_product','ogip.py',300),
('product -> WHITESPACE STAR WHITESPACE','product',3,'p_product','ogip.py',301),
('product -> STAR WHITESPACE','product',2,'p_product','ogip.py',302),
('power -> STARSTAR','power',1,'p_power','ogip.py',308),
('unit -> UNIT','unit',1,'p_unit','ogip.py',314),
('unit -> UNIT power numeric_power','unit',3,'p_unit','ogip.py',315),
('numeric_power -> UINT','numeric_power',1,'p_numeric_power','ogip.py',324),
('numeric_power -> signed_float','numeric_power',1,'p_numeric_power','ogip.py',325),
('numeric_power -> OPEN_PAREN signed_int CLOSE_PAREN','numeric_power',3,'p_numeric_power','ogip.py',326),
('numeric_power -> OPEN_PAREN signed_float CLOSE_PAREN','numeric_power',3,'p_numeric_power','ogip.py',327),
('numeric_power -> OPEN_PAREN signed_float division UINT CLOSE_PAREN','numeric_power',5,'p_numeric_power','ogip.py',328),
('sign -> SIGN','sign',1,'p_sign','ogip.py',339),
('sign -> <empty>','sign',0,'p_sign','ogip.py',340),
('signed_int -> SIGN UINT','signed_int',2,'p_signed_int','ogip.py',349),
('signed_float -> sign UINT','signed_float',2,'p_signed_float','ogip.py',355),
('signed_float -> sign UFLOAT','signed_float',2,'p_signed_float','ogip.py',356),
]
|
3f13b7a6b6147a1c20269ca15085ae69621f16e57c490a738b247f04ebb5e8fb | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# generic_lextab.py. This file automatically created by PLY (version 3.10). Don't edit!
_tabversion = '3.10'
_lextokens = set(('CARET', 'CLOSE_PAREN', 'DOUBLE_STAR', 'FUNCNAME', 'OPEN_PAREN', 'PERIOD', 'SIGN', 'SOLIDUS', 'STAR', 'UFLOAT', 'UINT', 'UNIT'))
_lexreflags = 32
_lexliterals = ''
_lexstateinfo = {'INITIAL': 'inclusive'}
_lexstatere = {'INITIAL': [("(?P<t_UFLOAT>((\\d+\\.?\\d*)|(\\.\\d+))([eE][+-]?\\d+)?)|(?P<t_UINT>\\d+)|(?P<t_SIGN>[+-](?=\\d))|(?P<t_FUNCNAME>((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\\ *\\())|(?P<t_UNIT>%|([YZEPTGMkhdcmunpfazy]?'((?!\\d)\\w)+')|((?!\\d)\\w)+)|(?P<t_DOUBLE_STAR>\\*\\*)|(?P<t_CLOSE_PAREN>\\))|(?P<t_OPEN_PAREN>\\()|(?P<t_CARET>\\^)|(?P<t_PERIOD>\\.)|(?P<t_STAR>\\*)|(?P<t_SOLIDUS>/)", [None, ('t_UFLOAT', 'UFLOAT'), None, None, None, None, ('t_UINT', 'UINT'), ('t_SIGN', 'SIGN'), ('t_FUNCNAME', 'FUNCNAME'), None, None, None, None, None, None, None, None, ('t_UNIT', 'UNIT'), None, None, None, (None, 'DOUBLE_STAR'), (None, 'CLOSE_PAREN'), (None, 'OPEN_PAREN'), (None, 'CARET'), (None, 'PERIOD'), (None, 'STAR'), (None, 'SOLIDUS')])]}
_lexstateignore = {'INITIAL': ' '}
_lexstateerrorf = {'INITIAL': 't_error'}
_lexstateeoff = {}
|
fa5e63838da35fd8d35696cdfa493e52f92db53f031020eabeb76ba5c6c10206 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from ...utils.misc import InheritDocstrings
class _FormatterMeta(InheritDocstrings):
registry = {}
def __new__(mcls, name, bases, members):
if 'name' in members:
formatter_name = members['name'].lower()
else:
formatter_name = members['name'] = name.lower()
cls = super().__new__(mcls, name, bases, members)
mcls.registry[formatter_name] = cls
return cls
TAB_HEADER = """# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
"""
class Base(metaclass=_FormatterMeta):
"""
The abstract base class of all unit formats.
"""
def __new__(cls, *args, **kwargs):
# This __new__ is to make it clear that there is no reason to
# instantiate a Formatter--if you try to you'll just get back the
# class
return cls
@classmethod
def parse(cls, s):
"""
Convert a string to a unit object.
"""
raise NotImplementedError(
"Can not parse {0}".format(cls.__name__))
@classmethod
def to_string(cls, u):
"""
Convert a unit object to a string.
"""
raise NotImplementedError(
"Can not output in {0} format".format(cls.__name__))
@classmethod
def _add_tab_header(cls, name):
lextab_file = os.path.join(os.path.dirname(__file__), name + '.py')
with open(lextab_file, 'r') as f:
contents = f.read()
with open(lextab_file, 'w') as f:
f.write(TAB_HEADER)
f.write(contents)
|
31179e55b55349867254460481a7d71913356c9adedfaa9035f81a0951b3419a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICNSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
import keyword
import math
import os
import copy
import warnings
from fractions import Fraction
from . import core, generic, utils
class OGIP(generic.Generic):
"""
Support the units in `Office of Guest Investigator Programs (OGIP)
FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__.
"""
_tokens = (
'DIVISION',
'OPEN_PAREN',
'CLOSE_PAREN',
'WHITESPACE',
'STARSTAR',
'STAR',
'SIGN',
'UFLOAT',
'LIT10',
'UINT',
'UNKNOWN',
'UNIT'
)
@staticmethod
def _generate_unit_names():
from ... import units as u
names = {}
deprecated_names = set()
bases = [
'A', 'C', 'cd', 'eV', 'F', 'g', 'H', 'Hz', 'J',
'Jy', 'K', 'lm', 'lx', 'm', 'mol', 'N', 'ohm', 'Pa',
'pc', 'rad', 's', 'S', 'sr', 'T', 'V', 'W', 'Wb'
]
deprecated_bases = []
prefixes = [
'y', 'z', 'a', 'f', 'p', 'n', 'u', 'm', 'c', 'd',
'', 'da', 'h', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'
]
for base in bases + deprecated_bases:
for prefix in prefixes:
key = prefix + base
if keyword.iskeyword(key):
continue
names[key] = getattr(u, key)
for base in deprecated_bases:
for prefix in prefixes:
deprecated_names.add(prefix + base)
simple_units = [
'angstrom', 'arcmin', 'arcsec', 'AU', 'barn', 'bin',
'byte', 'chan', 'count', 'day', 'deg', 'erg', 'G',
'h', 'lyr', 'mag', 'min', 'photon', 'pixel',
'voxel', 'yr'
]
for unit in simple_units:
names[unit] = getattr(u, unit)
# Create a separate, disconnected unit for the special case of
# Crab and mCrab, since OGIP doesn't define their quantities.
Crab = u.def_unit(['Crab'], prefixes=False, doc='Crab (X-ray flux)')
mCrab = u.Unit(10 ** -3 * Crab)
names['Crab'] = Crab
names['mCrab'] = mCrab
deprecated_units = ['Crab', 'mCrab']
for unit in deprecated_units:
deprecated_names.add(unit)
# Define the function names, so we can parse them, even though
# we can't use any of them (other than sqrt) meaningfully for
# now.
functions = [
'log', 'ln', 'exp', 'sqrt', 'sin', 'cos', 'tan', 'asin',
'acos', 'atan', 'sinh', 'cosh', 'tanh'
]
for name in functions:
names[name] = name
return names, deprecated_names, functions
@classmethod
def _make_lexer(cls):
from ...extern.ply import lex
tokens = cls._tokens
t_DIVISION = r'/'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_WHITESPACE = '[ \t]+'
t_STARSTAR = r'\*\*'
t_STAR = r'\*'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'(((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+))|(((\d+\.\d*)|(\.\d+))([eE][+-]?\d+)?)'
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
def t_X(t): # multiplication for factor in front of unit
r'[x×]'
return t
def t_LIT10(t):
r'10'
return 10
def t_UNKNOWN(t):
r'[Uu][Nn][Kk][Nn][Oo][Ww][Nn]'
return None
def t_UNIT(t):
r'[a-zA-Z][a-zA-Z_]*'
t.value = cls._get_unit(t)
return t
# Don't ignore whitespace
t_ignore = ''
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'ogip_lextab.py'))
lexer = lex.lex(optimize=True, lextab='ogip_lextab',
outputdir=os.path.dirname(__file__))
if not lexer_exists:
cls._add_tab_header('ogip_lextab')
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the
`Specification of Physical Units within OGIP FITS files
<https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/>`__,
which is not terribly precise. The exact grammar is here is
based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
"""
from ...extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : UNKNOWN
| complete_expression
| scale_factor complete_expression
| scale_factor WHITESPACE complete_expression
'''
if len(p) == 4:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_complete_expression(p):
'''
complete_expression : product_of_units
'''
p[0] = p[1]
def p_product_of_units(p):
'''
product_of_units : unit_expression
| division unit_expression
| product_of_units product unit_expression
| product_of_units division unit_expression
'''
if len(p) == 4:
if p[2] == 'DIVISION':
p[0] = p[1] / p[3]
else:
p[0] = p[1] * p[3]
elif len(p) == 3:
p[0] = p[2] ** -1
else:
p[0] = p[1]
def p_unit_expression(p):
'''
unit_expression : unit
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN
| OPEN_PAREN complete_expression CLOSE_PAREN
| UNIT OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
| OPEN_PAREN complete_expression CLOSE_PAREN power numeric_power
'''
# If we run p[1] in cls._functions, it will try and parse each
# item in the list into a unit, which is slow. Since we know that
# all the items in the list are strings, we can simply convert
# p[1] to a string instead.
p1_str = str(p[1])
if p1_str in cls._functions and p1_str != 'sqrt':
raise ValueError(
"The function '{0}' is valid in OGIP, but not understood "
"by astropy.units.".format(
p[1]))
if len(p) == 7:
if p1_str == 'sqrt':
p[0] = p[1] * p[3] ** (0.5 * p[6])
else:
p[0] = p[1] * p[3] ** p[6]
elif len(p) == 6:
p[0] = p[2] ** p[5]
elif len(p) == 5:
if p1_str == 'sqrt':
p[0] = p[3] ** 0.5
else:
p[0] = p[1] * p[3]
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_scale_factor(p):
'''
scale_factor : LIT10 power numeric_power
| LIT10
| signed_float
| signed_float power numeric_power
| signed_int power numeric_power
'''
if len(p) == 4:
p[0] = 10 ** p[3]
else:
p[0] = p[1]
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(p[0]) % 1.0 != 0.0:
from ..core import UnitsWarning
warnings.warn(
"'{0}' scale should be a power of 10 in "
"OGIP format".format(p[0]), UnitsWarning)
def p_division(p):
'''
division : DIVISION
| WHITESPACE DIVISION
| WHITESPACE DIVISION WHITESPACE
| DIVISION WHITESPACE
'''
p[0] = 'DIVISION'
def p_product(p):
'''
product : WHITESPACE
| STAR
| WHITESPACE STAR
| WHITESPACE STAR WHITESPACE
| STAR WHITESPACE
'''
p[0] = 'PRODUCT'
def p_power(p):
'''
power : STARSTAR
'''
p[0] = 'POWER'
def p_unit(p):
'''
unit : UNIT
| UNIT power numeric_power
'''
if len(p) == 4:
p[0] = p[1] ** p[3]
else:
p[0] = p[1]
def p_numeric_power(p):
'''
numeric_power : UINT
| signed_float
| OPEN_PAREN signed_int CLOSE_PAREN
| OPEN_PAREN signed_float CLOSE_PAREN
| OPEN_PAREN signed_float division UINT CLOSE_PAREN
'''
if len(p) == 6:
p[0] = Fraction(int(p[2]), int(p[4]))
elif len(p) == 4:
p[0] = p[2]
else:
p[0] = p[1]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_error(p):
raise ValueError()
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'ogip_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='ogip_parsetab',
outputdir=os.path.dirname(__file__),
write_tables=True)
if not parser_exists:
cls._add_tab_header('ogip_parsetab')
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {0}, '{1}': {2}".format(
t.lexpos, t.value, str(e)))
@classmethod
def _validate_unit(cls, unit, detailed_exception=True):
if unit not in cls._units:
if detailed_exception:
raise ValueError(
"Unit '{0}' not supported by the OGIP "
"standard. {1}".format(
unit, utils.did_you_mean_units(
unit, cls._units, cls._deprecated_units,
cls._to_decomposed_alternative)))
else:
raise ValueError()
if unit in cls._deprecated_units:
utils.unit_deprecation_warning(
unit, cls._units[unit], 'OGIP',
cls._to_decomposed_alternative)
@classmethod
def _parse_unit(cls, unit, detailed_exception=True):
cls._validate_unit(unit, detailed_exception=detailed_exception)
return cls._units[unit]
@classmethod
def parse(cls, s, debug=False):
s = s.strip()
try:
# This is a short circuit for the case where the string is
# just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError:
try:
return core.Unit(
cls._parser.parse(s, lexer=cls._lexer, debug=debug))
except ValueError as e:
if str(e):
raise
else:
raise ValueError(
"Syntax error parsing unit '{0}'".format(s))
@classmethod
def _get_unit_name(cls, unit):
name = unit.get_format_name('ogip')
cls._validate_unit(name)
return name
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power:
out.append('{0}**({1})'.format(
cls._get_unit_name(base), power))
else:
out.append('{0}**{1}'.format(
cls._get_unit_name(base), power))
return ' '.join(out)
@classmethod
def to_string(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
warnings.warn(
"'{0}' scale should be a power of 10 in "
"OGIP format".format(
unit.scale),
core.UnitsWarning)
return generic._to_string(cls, unit)
@classmethod
def _to_decomposed_alternative(cls, unit):
# Remove units that aren't known to the format
unit = utils.decompose_to_known_units(unit, cls._get_unit_name)
if isinstance(unit, core.CompositeUnit):
# Can't use np.log10 here, because p[0] may be a Python long.
if math.log10(unit.scale) % 1.0 != 0.0:
scale = unit.scale
unit = copy.copy(unit)
unit._scale = 1.0
return '{0} (with data multiplied by {1})'.format(
generic._to_string(cls, unit), scale)
return generic._to_string(unit)
|
cd2741eb27c87818a9fa8202b8af6df1e77d676dd1d656d73b273de9205ef414 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to this file.
# cds_parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'PRODUCT DIVISION OPEN_PAREN CLOSE_PAREN X SIGN UINT UFLOAT UNIT\n main : factor combined_units\n | combined_units\n | factor\n \n combined_units : product_of_units\n | division_of_units\n \n product_of_units : unit_expression PRODUCT combined_units\n | unit_expression\n \n division_of_units : DIVISION unit_expression\n | unit_expression DIVISION combined_units\n \n unit_expression : unit_with_power\n | OPEN_PAREN combined_units CLOSE_PAREN\n \n factor : signed_float X UINT signed_int\n | UINT X UINT signed_int\n | UINT signed_int\n | UINT\n | signed_float\n \n unit_with_power : UNIT numeric_power\n | UNIT\n \n numeric_power : sign UINT\n \n sign : SIGN\n |\n \n signed_int : SIGN UINT\n \n signed_float : sign UINT\n | sign UFLOAT\n '
_lr_action_items = {'UINT':([0,8,11,14,16,17,19,27,],[5,20,-20,-21,28,29,30,34,]),'DIVISION':([0,2,4,5,9,12,13,14,18,20,21,22,23,26,30,33,34,35,36,],[10,10,-16,-15,23,-10,10,-18,-14,-23,-24,10,10,-17,-22,-11,-19,-12,-13,]),'SIGN':([0,5,14,28,29,],[11,19,11,19,19,]),'UFLOAT':([0,8,11,],[-21,21,-20,]),'OPEN_PAREN':([0,2,4,5,10,13,18,20,21,22,23,30,35,36,],[13,13,-16,-15,13,13,-14,-23,-24,13,13,-22,-12,-13,]),'UNIT':([0,2,4,5,10,13,18,20,21,22,23,30,35,36,],[14,14,-16,-15,14,14,-14,-23,-24,14,14,-22,-12,-13,]),'$end':([1,2,3,4,5,6,7,9,12,14,15,18,20,21,24,26,30,31,32,33,34,35,36,],[0,-3,-2,-16,-15,-4,-5,-7,-10,-18,-1,-14,-23,-24,-8,-17,-22,-6,-9,-11,-19,-12,-13,]),'X':([4,5,20,21,],[16,17,-23,-24,]),'CLOSE_PAREN':([6,7,9,12,14,24,25,26,31,32,33,34,],[-4,-5,-7,-10,-18,-8,33,-17,-6,-9,-11,-19,]),'PRODUCT':([9,12,14,26,33,34,],[22,-10,-18,-17,-11,-19,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'main':([0,],[1,]),'factor':([0,],[2,]),'combined_units':([0,2,13,22,23,],[3,15,25,31,32,]),'signed_float':([0,],[4,]),'product_of_units':([0,2,13,22,23,],[6,6,6,6,6,]),'division_of_units':([0,2,13,22,23,],[7,7,7,7,7,]),'sign':([0,14,],[8,27,]),'unit_expression':([0,2,10,13,22,23,],[9,9,24,9,9,9,]),'unit_with_power':([0,2,10,13,22,23,],[12,12,12,12,12,12,]),'signed_int':([5,28,29,],[18,35,36,]),'numeric_power':([14,],[26,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> main","S'",1,None,None,None),
('main -> factor combined_units','main',2,'p_main','cds.py',158),
('main -> combined_units','main',1,'p_main','cds.py',159),
('main -> factor','main',1,'p_main','cds.py',160),
('combined_units -> product_of_units','combined_units',1,'p_combined_units','cds.py',170),
('combined_units -> division_of_units','combined_units',1,'p_combined_units','cds.py',171),
('product_of_units -> unit_expression PRODUCT combined_units','product_of_units',3,'p_product_of_units','cds.py',177),
('product_of_units -> unit_expression','product_of_units',1,'p_product_of_units','cds.py',178),
('division_of_units -> DIVISION unit_expression','division_of_units',2,'p_division_of_units','cds.py',187),
('division_of_units -> unit_expression DIVISION combined_units','division_of_units',3,'p_division_of_units','cds.py',188),
('unit_expression -> unit_with_power','unit_expression',1,'p_unit_expression','cds.py',197),
('unit_expression -> OPEN_PAREN combined_units CLOSE_PAREN','unit_expression',3,'p_unit_expression','cds.py',198),
('factor -> signed_float X UINT signed_int','factor',4,'p_factor','cds.py',207),
('factor -> UINT X UINT signed_int','factor',4,'p_factor','cds.py',208),
('factor -> UINT signed_int','factor',2,'p_factor','cds.py',209),
('factor -> UINT','factor',1,'p_factor','cds.py',210),
('factor -> signed_float','factor',1,'p_factor','cds.py',211),
('unit_with_power -> UNIT numeric_power','unit_with_power',2,'p_unit_with_power','cds.py',228),
('unit_with_power -> UNIT','unit_with_power',1,'p_unit_with_power','cds.py',229),
('numeric_power -> sign UINT','numeric_power',2,'p_numeric_power','cds.py',238),
('sign -> SIGN','sign',1,'p_sign','cds.py',244),
('sign -> <empty>','sign',0,'p_sign','cds.py',245),
('signed_int -> SIGN UINT','signed_int',2,'p_signed_int','cds.py',254),
('signed_float -> sign UINT','signed_float',2,'p_signed_float','cds.py',260),
('signed_float -> sign UFLOAT','signed_float',2,'p_signed_float','cds.py',261),
]
|
ee75071fbe950429f7f93245d709ca7d908415670651d8a99e9158fa7cc52f13 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles a "generic" string format for units
"""
import os
import re
import warnings
from . import core, utils
from .base import Base
from ...utils import classproperty
from ...utils.misc import did_you_mean
def _to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
parts = []
if cls._show_scale and unit.scale != 1:
parts.append('{0:g}'.format(unit.scale))
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers)
if len(positives):
parts.append(cls._format_unit_list(positives))
elif len(parts) == 0:
parts.append('1')
if len(negatives):
parts.append('/')
unit_list = cls._format_unit_list(negatives)
if len(negatives) == 1:
parts.append('{0}'.format(unit_list))
else:
parts.append('({0})'.format(unit_list))
return ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
return cls._get_unit_name(unit)
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_show_scale = True
_tokens = (
'DOUBLE_STAR',
'STAR',
'PERIOD',
'SOLIDUS',
'CARET',
'OPEN_PAREN',
'CLOSE_PAREN',
'FUNCNAME',
'UNIT',
'SIGN',
'UINT',
'UFLOAT'
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
from ...extern.ply import lex
tokens = cls._tokens
t_STAR = r'\*'
t_PERIOD = r'\.'
t_SOLIDUS = r'/'
t_DOUBLE_STAR = r'\*\*'
t_CARET = r'\^'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
elif t.value.endswith('.'):
t.type = 'UINT'
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = float(t.value + '1')
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r'((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()'
return t
def t_UNIT(t):
r"%|([YZEPTGMkhdcmunpfazy]?'((?!\d)\w)+')|((?!\d)\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'generic_lextab.py'))
lexer = lex.lex(optimize=True, lextab='generic_lextab',
outputdir=os.path.dirname(__file__),
reflags=int(re.UNICODE))
if not lexer_exists:
cls._add_tab_header('generic_lextab')
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
from ...extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
'''
from ..core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
'''
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
'''
from ..core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
'''
inverse_unit : division unit_expression
'''
p[0] = p[2] ** -1
def p_factor(p):
'''
factor : factor_fits
| factor_float
| factor_int
'''
p[0] = p[1]
def p_factor_float(p):
'''
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
'''
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
'''
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power signed_int
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
'''
if p[1] != 10:
if cls.name == 'fits':
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ('**', '^'):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
'''
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
'''
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
'''
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
'''
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
'''
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
'''
paren_expr : sign UINT
| signed_float
| frac
'''
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
'''
frac : sign UINT division sign UINT
'''
p[0] = (p[1] * p[2]) / (p[4] * p[5])
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_product(p):
'''
product : STAR
| PERIOD
'''
pass
def p_division(p):
'''
division : SOLIDUS
'''
pass
def p_power(p):
'''
power : DOUBLE_STAR
| CARET
'''
p[0] = p[1]
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_function_name(p):
'''
function_name : FUNCNAME
'''
p[0] = p[1]
def p_function(p):
'''
function : function_name OPEN_PAREN main CLOSE_PAREN
'''
if p[1] == 'sqrt':
p[0] = p[3] ** 0.5
return
elif p[1] in ('mag', 'dB', 'dex'):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError("'{0}' is not a recognized function".format(p[1]))
def p_error(p):
raise ValueError()
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'generic_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='generic_parsetab',
outputdir=os.path.dirname(__file__))
if not parser_exists:
cls._add_tab_header('generic_parsetab')
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {0}, {1}".format(
t.lexpos, str(e)))
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s == '%':
return registry['percent']
elif s in registry:
return registry[s]
if detailed_exception:
raise ValueError(
'{0} is not a valid unit. {1}'.format(
s, did_you_mean(s, registry)))
else:
raise ValueError()
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, str):
s = s.decode('ascii')
result = cls._do_parse(s, debug=debug)
if s.count('/') > 1:
warnings.warn(
"'{0}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(
"Syntax error parsing unit '{0}'".format(s))
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name('generic')
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power:
out.append('{0}({1})'.format(
cls._get_unit_name(base), power))
else:
out.append('{0}{1}'.format(
cls._get_unit_name(base), power))
return ' '.join(out)
@classmethod
def to_string(cls, unit):
return _to_string(cls, unit)
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
_show_scale = False
|
948d6256b7ad737c68bacc63d7807099349abeff9ee80ae8bdd7d31e6bd16912 | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Separate tests specifically for equivalencies."""
# THIRD-PARTY
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose
# LOCAL
from ... import units as u
from ... import constants
from ...tests.helper import assert_quantity_allclose
def test_dimensionless_angles():
# test that the angles_dimensionless option allows one to change
# by any order in radian in the unit (#1161)
rad1 = u.dimensionless_angles()
assert u.radian.to(1, equivalencies=rad1) == 1.
assert u.deg.to(1, equivalencies=rad1) == u.deg.to(u.rad)
assert u.steradian.to(1, equivalencies=rad1) == 1.
assert u.dimensionless_unscaled.to(u.steradian, equivalencies=rad1) == 1.
# now quantities
assert (1.*u.radian).to_value(1, equivalencies=rad1) == 1.
assert (1.*u.deg).to_value(1, equivalencies=rad1) == u.deg.to(u.rad)
assert (1.*u.steradian).to_value(1, equivalencies=rad1) == 1.
# more complicated example
I = 1.e45 * u.g * u.cm**2
Omega = u.cycle / (1.*u.s)
Erot = 0.5 * I * Omega**2
# check that equivalency makes this work
Erot_in_erg1 = Erot.to(u.erg, equivalencies=rad1)
# and check that value is correct
assert_allclose(Erot_in_erg1.value, (Erot/u.radian**2).to_value(u.erg))
# test build-in equivalency in subclass
class MyRad1(u.Quantity):
_equivalencies = rad1
phase = MyRad1(1., u.cycle)
assert phase.to_value(1) == u.cycle.to(u.radian)
@pytest.mark.parametrize('log_unit', (u.mag, u.dex, u.dB))
def test_logarithmic(log_unit):
# check conversion of mag, dB, and dex to dimensionless and vice versa
with pytest.raises(u.UnitsError):
log_unit.to(1, 0.)
with pytest.raises(u.UnitsError):
u.dimensionless_unscaled.to(log_unit)
assert log_unit.to(1, 0., equivalencies=u.logarithmic()) == 1.
assert u.dimensionless_unscaled.to(log_unit,
equivalencies=u.logarithmic()) == 0.
# also try with quantities
q_dex = np.array([0., -1., 1., 2.]) * u.dex
q_expected = 10.**q_dex.value * u.dimensionless_unscaled
q_log_unit = q_dex.to(log_unit)
assert np.all(q_log_unit.to(1, equivalencies=u.logarithmic()) ==
q_expected)
assert np.all(q_expected.to(log_unit, equivalencies=u.logarithmic()) ==
q_log_unit)
with u.set_enabled_equivalencies(u.logarithmic()):
assert np.all(np.abs(q_log_unit - q_expected.to(log_unit)) <
1.e-10*log_unit)
doppler_functions = [u.doppler_optical, u.doppler_radio, u.doppler_relativistic]
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_frequency_0(function):
rest = 105.01 * u.GHz
velo0 = rest.to(u.km/u.s, equivalencies=function(rest))
assert velo0.value == 0
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_wavelength_0(function):
rest = 105.01 * u.GHz
q1 = 0.00285489437196 * u.m
velo0 = q1.to(u.km/u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_energy_0(function):
rest = 105.01 * u.GHz
q1 = 0.0004342864612223407 * u.eV
velo0 = q1.to(u.km/u.s, equivalencies=function(rest))
np.testing.assert_almost_equal(velo0.value, 0, decimal=6)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_frequency_circle(function):
rest = 105.01 * u.GHz
shifted = 105.03 * u.GHz
velo = shifted.to(u.km/u.s, equivalencies=function(rest))
freq = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(freq.value, shifted.value, decimal=7)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_wavelength_circle(function):
rest = 105.01 * u.nm
shifted = 105.03 * u.nm
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
wav = velo.to(u.nm, equivalencies=function(rest))
np.testing.assert_almost_equal(wav.value, shifted.value, decimal=7)
@pytest.mark.parametrize(('function'), doppler_functions)
def test_doppler_energy_circle(function):
rest = 1.0501 * u.eV
shifted = 1.0503 * u.eV
velo = shifted.to(u.km / u.s, equivalencies=function(rest))
en = velo.to(u.eV, equivalencies=function(rest))
np.testing.assert_almost_equal(en.value, shifted.value, decimal=7)
values_ghz = (999.899940784289, 999.8999307714406, 999.8999357778647)
@pytest.mark.parametrize(('function', 'value'),
list(zip(doppler_functions, values_ghz)))
def test_30kms(function, value):
rest = 1000 * u.GHz
velo = 30 * u.km/u.s
shifted = velo.to(u.GHz, equivalencies=function(rest))
np.testing.assert_almost_equal(shifted.value, value, decimal=7)
bad_values = (5, 5*u.Jy, None)
@pytest.mark.parametrize(('function', 'value'),
list(zip(doppler_functions, bad_values)))
def test_bad_restfreqs(function, value):
with pytest.raises(u.UnitsError):
function(value)
def test_massenergy():
# The relative tolerance of these tests is set by the uncertainties
# in the charge of the electron, which is known to about
# 3e-9 (relative tolerance). Therefore, we limit the
# precision of the tests to 1e-7 to be safe. The masses are
# (loosely) known to ~ 5e-8 rel tolerance, so we couldn't test to
# 1e-7 if we used the values from astropy.constants; that is,
# they might change by more than 1e-7 in some future update, so instead
# they are hardwired here.
# Electron, proton, neutron, muon, 1g
mass_eV = u.Quantity([510.998928e3, 938.272046e6, 939.565378e6,
105.6583715e6, 5.60958884539e32], u.eV)
mass_g = u.Quantity([9.10938291e-28, 1.672621777e-24, 1.674927351e-24,
1.88353147e-25, 1], u.g)
# Test both ways
assert np.allclose(mass_eV.to_value(u.g, equivalencies=u.mass_energy()),
mass_g.value, rtol=1e-7)
assert np.allclose(mass_g.to_value(u.eV, equivalencies=u.mass_energy()),
mass_eV.value, rtol=1e-7)
# Basic tests of 'derived' equivalencies
# Surface density
sdens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**2)
sdens_g = u.Quantity(1e-4, u.g / u.cm**2)
assert np.allclose(sdens_eV.to_value(u.g / u.cm**2,
equivalencies=u.mass_energy()),
sdens_g.value, rtol=1e-7)
assert np.allclose(sdens_g.to_value(u.eV / u.m**2,
equivalencies=u.mass_energy()),
sdens_eV.value, rtol=1e-7)
# Density
dens_eV = u.Quantity(5.60958884539e32, u.eV / u.m**3)
dens_g = u.Quantity(1e-6, u.g / u.cm**3)
assert np.allclose(dens_eV.to_value(u.g / u.cm**3,
equivalencies=u.mass_energy()),
dens_g.value, rtol=1e-7)
assert np.allclose(dens_g.to_value(u.eV / u.m**3,
equivalencies=u.mass_energy()),
dens_eV.value, rtol=1e-7)
# Power
pow_eV = u.Quantity(5.60958884539e32, u.eV / u.s)
pow_g = u.Quantity(1, u.g / u.s)
assert np.allclose(pow_eV.to_value(u.g / u.s,
equivalencies=u.mass_energy()),
pow_g.value, rtol=1e-7)
assert np.allclose(pow_g.to_value(u.eV / u.s,
equivalencies=u.mass_energy()),
pow_eV.value, rtol=1e-7)
def test_is_equivalent():
assert u.m.is_equivalent(u.pc)
assert u.cycle.is_equivalent(u.mas)
assert not u.cycle.is_equivalent(u.dimensionless_unscaled)
assert u.cycle.is_equivalent(u.dimensionless_unscaled,
u.dimensionless_angles())
assert not (u.Hz.is_equivalent(u.J))
assert u.Hz.is_equivalent(u.J, u.spectral())
assert u.J.is_equivalent(u.Hz, u.spectral())
assert u.pc.is_equivalent(u.arcsecond, u.parallax())
assert u.arcminute.is_equivalent(u.au, u.parallax())
# Pass a tuple for multiple possibilities
assert u.cm.is_equivalent((u.m, u.s, u.kg))
assert u.ms.is_equivalent((u.m, u.s, u.kg))
assert u.g.is_equivalent((u.m, u.s, u.kg))
assert not u.L.is_equivalent((u.m, u.s, u.kg))
assert not (u.km / u.s).is_equivalent((u.m, u.s, u.kg))
def test_parallax():
a = u.arcsecond.to(u.pc, 10, u.parallax())
assert_allclose(a, 0.10)
b = u.pc.to(u.arcsecond, a, u.parallax())
assert_allclose(b, 10)
a = u.arcminute.to(u.au, 1, u.parallax())
assert_allclose(a, 3437.7467916)
b = u.au.to(u.arcminute, a, u.parallax())
assert_allclose(b, 1)
def test_parallax2():
a = u.arcsecond.to(u.pc, [0.1, 2.5], u.parallax())
assert_allclose(a, [10, 0.4])
def test_spectral():
a = u.AA.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+18)
b = u.Hz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.AA.to(u.MHz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+12)
b = u.MHz.to(u.AA, a, u.spectral())
assert_allclose(b, 1)
a = u.m.to(u.Hz, 1, u.spectral())
assert_allclose(a, 2.9979245799999995e+8)
b = u.Hz.to(u.m, a, u.spectral())
assert_allclose(b, 1)
def test_spectral2():
a = u.nm.to(u.J, 500, u.spectral())
assert_allclose(a, 3.972891366538605e-19)
b = u.J.to(u.nm, a, u.spectral())
assert_allclose(b, 500)
a = u.AA.to(u.Hz, 1, u.spectral())
b = u.Hz.to(u.J, a, u.spectral())
c = u.AA.to(u.J, 1, u.spectral())
assert_allclose(b, c)
c = u.J.to(u.Hz, b, u.spectral())
assert_allclose(a, c)
def test_spectral3():
a = u.nm.to(u.Hz, [1000, 2000], u.spectral())
assert_allclose(a, [2.99792458e+14, 1.49896229e+14])
@pytest.mark.parametrize(
('in_val', 'in_unit'),
[([0.1, 5000.0, 10000.0], u.AA),
([1e+5, 2.0, 1.0], u.micron ** -1),
([2.99792458e+19, 5.99584916e+14, 2.99792458e+14], u.Hz),
([1.98644568e-14, 3.97289137e-19, 1.98644568e-19], u.J)])
def test_spectral4(in_val, in_unit):
"""Wave number conversion w.r.t. wavelength, freq, and energy."""
# Spectroscopic and angular
out_units = [u.micron ** -1, u.radian / u.micron]
answers = [[1e+5, 2.0, 1.0], [6.28318531e+05, 12.5663706, 6.28318531]]
for out_unit, ans in zip(out_units, answers):
# Forward
a = in_unit.to(out_unit, in_val, u.spectral())
assert_allclose(a, ans)
# Backward
b = out_unit.to(in_unit, ans, u.spectral())
assert_allclose(b, in_val)
def test_spectraldensity2():
# flux density
flambda = u.erg / u.angstrom / u.cm ** 2 / u.s
fnu = u.erg / u.Hz / u.cm ** 2 / u.s
a = flambda.to(fnu, 1, u.spectral_density(u.Quantity(3500, u.AA)))
assert_allclose(a, 4.086160166177361e-12)
# luminosity density
llambda = u.erg / u.angstrom / u.s
lnu = u.erg / u.Hz / u.s
a = llambda.to(lnu, 1, u.spectral_density(u.Quantity(3500, u.AA)))
assert_allclose(a, 4.086160166177361e-12)
a = lnu.to(llambda, 1, u.spectral_density(u.Quantity(3500, u.AA)))
assert_allclose(a, 2.44728537142857e11)
def test_spectraldensity3():
# Define F_nu in Jy
f_nu = u.Jy
# Define F_lambda in ergs / cm^2 / s / micron
f_lambda = u.erg / u.cm ** 2 / u.s / u.micron
# 1 GHz
one_ghz = u.Quantity(1, u.GHz)
# Convert to ergs / cm^2 / s / Hz
assert_allclose(f_nu.to(u.erg / u.cm ** 2 / u.s / u.Hz, 1.), 1.e-23, 10)
# Convert to ergs / cm^2 / s at 10 Ghz
assert_allclose(f_nu.to(u.erg / u.cm ** 2 / u.s, 1.,
equivalencies=u.spectral_density(one_ghz * 10)),
1.e-13, 10)
# Convert to F_lambda at 1 Ghz
assert_allclose(f_nu.to(f_lambda, 1.,
equivalencies=u.spectral_density(one_ghz)),
3.335640951981521e-20, 10)
# Convert to Jy at 1 Ghz
assert_allclose(f_lambda.to(u.Jy, 1.,
equivalencies=u.spectral_density(one_ghz)),
1. / 3.335640951981521e-20, 10)
# Convert to ergs / cm^2 / s at 10 microns
assert_allclose(f_lambda.to(u.erg / u.cm ** 2 / u.s, 1.,
equivalencies=u.spectral_density(u.Quantity(10, u.micron))),
10., 10)
def test_spectraldensity4():
"""PHOTLAM and PHOTNU conversions."""
flam = u.erg / (u.cm ** 2 * u.s * u.AA)
fnu = u.erg / (u.cm ** 2 * u.s * u.Hz)
photlam = u.photon / (u.cm ** 2 * u.s * u.AA)
photnu = u.photon / (u.cm ** 2 * u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_photlam = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_photnu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_flam = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_fnu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
flux_jy = [3.20735792e-2, 3.29903646e-2, 3.21727226e-2]
flux_stmag = [12.41858665, 12.38919182, 12.41764379]
flux_abmag = [12.63463143, 12.60403221, 12.63128047]
# PHOTLAM <--> FLAM
assert_allclose(photlam.to(
flam, flux_photlam, u.spectral_density(wave)), flux_flam, rtol=1e-6)
assert_allclose(flam.to(
photlam, flux_flam, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> FNU
assert_allclose(photlam.to(
fnu, flux_photlam, u.spectral_density(wave)), flux_fnu, rtol=1e-6)
assert_allclose(fnu.to(
photlam, flux_fnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> Jy
assert_allclose(photlam.to(
u.Jy, flux_photlam, u.spectral_density(wave)), flux_jy, rtol=1e-6)
assert_allclose(u.Jy.to(
photlam, flux_jy, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> PHOTNU
assert_allclose(photlam.to(
photnu, flux_photlam, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
assert_allclose(photnu.to(
photlam, flux_photnu, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTNU <--> FNU
assert_allclose(photnu.to(
fnu, flux_photnu, u.spectral_density(wave)), flux_fnu, rtol=1e-6)
assert_allclose(fnu.to(
photnu, flux_fnu, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
# PHOTNU <--> FLAM
assert_allclose(photnu.to(
flam, flux_photnu, u.spectral_density(wave)), flux_flam, rtol=1e-6)
assert_allclose(flam.to(
photnu, flux_flam, u.spectral_density(wave)), flux_photnu, rtol=1e-6)
# PHOTLAM <--> STMAG
assert_allclose(photlam.to(
u.STmag, flux_photlam, u.spectral_density(wave)), flux_stmag, rtol=1e-6)
assert_allclose(u.STmag.to(
photlam, flux_stmag, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
# PHOTLAM <--> ABMAG
assert_allclose(photlam.to(
u.ABmag, flux_photlam, u.spectral_density(wave)), flux_abmag, rtol=1e-6)
assert_allclose(u.ABmag.to(
photlam, flux_abmag, u.spectral_density(wave)), flux_photlam, rtol=1e-6)
def test_spectraldensity5():
""" Test photon luminosity density conversions. """
L_la = u.erg / (u.s * u.AA)
L_nu = u.erg / (u.s * u.Hz)
phot_L_la = u.photon / (u.s * u.AA)
phot_L_nu = u.photon / (u.s * u.Hz)
wave = u.Quantity([4956.8, 4959.55, 4962.3], u.AA)
flux_phot_L_la = [9.7654e-3, 1.003896e-2, 9.78473e-3]
flux_phot_L_nu = [8.00335589e-14, 8.23668949e-14, 8.03700310e-14]
flux_L_la = [3.9135e-14, 4.0209e-14, 3.9169e-14]
flux_L_nu = [3.20735792e-25, 3.29903646e-25, 3.21727226e-25]
# PHOTLAM <--> FLAM
assert_allclose(phot_L_la.to(
L_la, flux_phot_L_la, u.spectral_density(wave)), flux_L_la, rtol=1e-6)
assert_allclose(L_la.to(
phot_L_la, flux_L_la, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTLAM <--> FNU
assert_allclose(phot_L_la.to(
L_nu, flux_phot_L_la, u.spectral_density(wave)), flux_L_nu, rtol=1e-6)
assert_allclose(L_nu.to(
phot_L_la, flux_L_nu, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTLAM <--> PHOTNU
assert_allclose(phot_L_la.to(
phot_L_nu, flux_phot_L_la, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
assert_allclose(phot_L_nu.to(
phot_L_la, flux_phot_L_nu, u.spectral_density(wave)), flux_phot_L_la, rtol=1e-6)
# PHOTNU <--> FNU
assert_allclose(phot_L_nu.to(
L_nu, flux_phot_L_nu, u.spectral_density(wave)), flux_L_nu, rtol=1e-6)
assert_allclose(L_nu.to(
phot_L_nu, flux_L_nu, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
# PHOTNU <--> FLAM
assert_allclose(phot_L_nu.to(
L_la, flux_phot_L_nu, u.spectral_density(wave)), flux_L_la, rtol=1e-6)
assert_allclose(L_la.to(
phot_L_nu, flux_L_la, u.spectral_density(wave)), flux_phot_L_nu, rtol=1e-6)
def test_equivalent_units():
from .. import imperial
with u.add_enabled_units(imperial):
units = u.g.find_equivalent_units()
units_set = set(units)
match = set(
[u.M_e, u.M_p, u.g, u.kg, u.solMass, u.t, u.u, u.M_earth,
u.M_jup, imperial.oz, imperial.lb, imperial.st, imperial.ton,
imperial.slug])
assert units_set == match
r = repr(units)
assert r.count('\n') == len(units) + 2
def test_equivalent_units2():
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = set(
[u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad])
assert units == match
from .. import imperial
with u.add_enabled_units(imperial):
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = set(
[u.AU, u.Angstrom, imperial.BTU, u.Hz, u.J, u.Ry,
imperial.cal, u.cm, u.eV, u.erg, imperial.ft, imperial.fur,
imperial.inch, imperial.kcal, u.lyr, u.m, imperial.mi,
imperial.mil, u.micron, u.pc, u.solRad, imperial.yd, u.Bq, u.Ci,
imperial.nmi, u.k, u.earthRad, u.jupiterRad])
assert units == match
units = set(u.Hz.find_equivalent_units(u.spectral()))
match = set(
[u.AU, u.Angstrom, u.Hz, u.J, u.Ry, u.cm, u.eV, u.erg, u.lyr,
u.m, u.micron, u.pc, u.solRad, u.Bq, u.Ci, u.k, u.earthRad,
u.jupiterRad])
assert units == match
def test_trivial_equivalency():
assert u.m.to(u.kg, equivalencies=[(u.m, u.kg)]) == 1.0
def test_invalid_equivalency():
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m,)])
with pytest.raises(ValueError):
u.m.to(u.kg, equivalencies=[(u.m, 5.0)])
def test_irrelevant_equivalency():
with pytest.raises(u.UnitsError):
u.m.to(u.kg, equivalencies=[(u.m, u.l)])
def test_brightness_temperature():
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052590289134352 * u.K
np.testing.assert_almost_equal(
tb.value, (1 * u.Jy).to_value(
u.K, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)))
np.testing.assert_almost_equal(
1.0, tb.to_value(
u.Jy, equivalencies=u.brightness_temperature(nu, beam_area=omega_B)))
def test_swapped_args_brightness_temperature():
"""
#5173 changes the order of arguments but accepts the old (deprecated) args
"""
omega_B = np.pi * (50 * u.arcsec) ** 2
nu = u.GHz * 5
tb = 7.052590289134352 * u.K
# https://docs.pytest.org/en/latest/warnings.html#ensuring-function-triggers
with warnings.catch_warnings():
warnings.simplefilter('always')
with pytest.warns(DeprecationWarning) as warning_list:
result = (1*u.Jy).to(u.K,
equivalencies=u.brightness_temperature(omega_B,
nu))
roundtrip = result.to(u.Jy,
equivalencies=u.brightness_temperature(omega_B,
nu))
assert len(warning_list) == 2
np.testing.assert_almost_equal(tb.value, result.value)
np.testing.assert_almost_equal(roundtrip.value, 1)
def test_surfacebrightness():
sb = 50*u.MJy/u.sr
k = sb.to(u.K, u.brightness_temperature(50*u.GHz))
np.testing.assert_almost_equal(k.value, 0.650965, 5)
assert k.unit.is_equivalent(u.K)
def test_beam():
# pick a beam area: 2 pi r^2 = area of a Gaussina with sigma=50 arcsec
omega_B = 2 * np.pi * (50 * u.arcsec) ** 2
new_beam = (5*u.beam).to(u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(omega_B.to(u.sr).value * 5, new_beam.value)
assert new_beam.unit.is_equivalent(u.sr)
# make sure that it's still consistent with 5 beams
nbeams = new_beam.to(u.beam, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(nbeams.value, 5)
# test inverse beam equivalency
# (this is just a sanity check that the equivalency is defined;
# it's not for testing numerical consistency)
new_inverse_beam = (5/u.beam).to(1/u.sr, u.equivalencies.beam_angular_area(omega_B))
# test practical case
# (this is by far the most important one)
flux_density = (5*u.Jy/u.beam).to(u.MJy/u.sr, u.equivalencies.beam_angular_area(omega_B))
np.testing.assert_almost_equal(flux_density.value, 13.5425483146382)
def test_equivalency_context():
with u.set_enabled_equivalencies(u.dimensionless_angles()):
phase = u.Quantity(1., u.cycle)
assert_allclose(np.exp(1j*phase), 1.)
Omega = u.cycle / (1.*u.minute)
assert_allclose(np.exp(1j*Omega*60.*u.second), 1.)
# ensure we can turn off equivalencies even within the scope
with pytest.raises(u.UnitsError):
phase.to(1, equivalencies=None)
# test the manager also works in the Quantity constructor.
q1 = u.Quantity(phase, u.dimensionless_unscaled)
assert_allclose(q1.value, u.cycle.to(u.radian))
# and also if we use a class that happens to have a unit attribute.
class MyQuantityLookalike(np.ndarray):
pass
mylookalike = np.array(1.).view(MyQuantityLookalike)
mylookalike.unit = 'cycle'
# test the manager also works in the Quantity constructor.
q2 = u.Quantity(mylookalike, u.dimensionless_unscaled)
assert_allclose(q2.value, u.cycle.to(u.radian))
with u.set_enabled_equivalencies(u.spectral()):
u.GHz.to(u.cm)
eq_on = u.GHz.find_equivalent_units()
with pytest.raises(u.UnitsError):
u.GHz.to(u.cm, equivalencies=None)
# without equivalencies, we should find a smaller (sub)set
eq_off = u.GHz.find_equivalent_units()
assert all(eq in set(eq_on) for eq in eq_off)
assert set(eq_off) < set(eq_on)
# Check the equivalency manager also works in ufunc evaluations,
# not just using (wrong) scaling. [#2496]
l2v = u.doppler_optical(6000 * u.angstrom)
l1 = 6010 * u.angstrom
assert l1.to(u.km/u.s, equivalencies=l2v) > 100. * u.km / u.s
with u.set_enabled_equivalencies(l2v):
assert l1 > 100. * u.km / u.s
assert abs((l1 - 500. * u.km / u.s).to(u.angstrom)) < 1. * u.km/u.s
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
def just_to_from_units(equivalencies):
return [(equiv[0], equiv[1]) for equiv in equivalencies]
tf_dimensionless_angles = just_to_from_units(u.dimensionless_angles())
tf_spectral = just_to_from_units(u.spectral())
assert base_registry.equivalencies == []
with u.set_enabled_equivalencies(u.dimensionless_angles()):
new_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(new_registry.equivalencies)) ==
set(tf_dimensionless_angles))
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.set_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(newer_registry.equivalencies)) ==
set(tf_spectral))
assert (set(newer_registry.all_units) ==
set(base_registry.all_units))
assert (set(just_to_from_units(new_registry.equivalencies)) ==
set(tf_dimensionless_angles))
assert set(new_registry.all_units) == set(base_registry.all_units)
with u.add_enabled_equivalencies(u.spectral()):
newer_registry = u.get_current_unit_registry()
assert (set(just_to_from_units(newer_registry.equivalencies)) ==
set(tf_dimensionless_angles) | set(tf_spectral))
assert (set(newer_registry.all_units) ==
set(base_registry.all_units))
assert base_registry is u.get_current_unit_registry()
def test_temperature():
from ..imperial import deg_F
t_k = 0 * u.K
assert_allclose(t_k.to_value(u.deg_C, u.temperature()), -273.15)
assert_allclose(t_k.to_value(deg_F, u.temperature()), -459.67)
def test_temperature_energy():
x = 1000 * u.K
y = (x * constants.k_B).to(u.keV)
assert_allclose(x.to_value(u.keV, u.temperature_energy()), y.value)
assert_allclose(y.to_value(u.K, u.temperature_energy()), x.value)
def test_molar_mass_amu():
x = 1 * (u.g/u.mol)
y = 1 * u.u
assert_allclose(x.to_value(u.u, u.molar_mass_amu()), y.value)
assert_allclose(y.to_value(u.g/u.mol, u.molar_mass_amu()), x.value)
with pytest.raises(u.UnitsError):
x.to(u.u)
def test_compose_equivalencies():
x = u.Unit("arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.pc
x = u.Unit("2 arcsec").compose(units=(u.pc,), equivalencies=u.parallax())
assert x[0] == u.Unit(0.5 * u.pc)
x = u.degree.compose(equivalencies=u.dimensionless_angles())
assert u.Unit(u.degree.to(u.radian)) in x
x = (u.nm).compose(units=(u.m, u.s), equivalencies=u.doppler_optical(0.55*u.micron))
for y in x:
if y.bases == [u.m, u.s]:
assert y.powers == [1, -1]
assert_allclose(
y.scale,
u.nm.to(u.m / u.s, equivalencies=u.doppler_optical(0.55 * u.micron)))
break
else:
assert False, "Didn't find speed in compose results"
def test_pixel_scale():
pix = 75*u.pix
asec = 30*u.arcsec
pixscale = 0.4*u.arcsec/u.pix
pixscale2 = 2.5*u.pix/u.arcsec
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale)), asec)
assert_quantity_allclose(pix.to(u.arcsec, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(pix.to(u.arcmin, u.pixel_scale(pixscale2)), asec)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale)), pix)
assert_quantity_allclose(asec.to(u.pix, u.pixel_scale(pixscale2)), pix)
def test_plate_scale():
mm = 1.5*u.mm
asec = 30*u.arcsec
platescale = 20*u.arcsec/u.mm
platescale2 = 0.05*u.mm/u.arcsec
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale)), asec)
assert_quantity_allclose(mm.to(u.arcsec, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(mm.to(u.arcmin, u.plate_scale(platescale2)), asec)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale)), mm)
assert_quantity_allclose(asec.to(u.mm, u.plate_scale(platescale2)), mm)
|
739fd0d52dd0a86f0dc15ca4dc172b7fabeca80582b066f2657ee8663ec15855 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units.format package
"""
import pytest
from numpy.testing import assert_allclose
from ...tests.helper import catch_warnings
from ... import units as u
from ...constants import si
from .. import core
from .. import format as u_format
from ..utils import is_effectively_unity
@pytest.mark.parametrize('strings, unit', [
(["m s", "m*s", "m.s"], u.m * u.s),
(["m/s", "m*s**-1", "m /s", "m / s", "m/ s"], u.m / u.s),
(["m**2", "m2", "m**(2)", "m**+2", "m+2", "m^(+2)"], u.m ** 2),
(["m**-3", "m-3", "m^(-3)", "/m3"], u.m ** -3),
(["m**(1.5)", "m(3/2)", "m**(3/2)", "m^(3/2)"], u.m ** 1.5),
(["2.54 cm"], u.Unit(u.cm * 2.54)),
(["10+8m"], u.Unit(u.m * 1e8)),
# This is the VOUnits documentation, but doesn't seem to follow the
# unity grammar (["3.45 10**(-4)Jy"], 3.45 * 1e-4 * u.Jy)
(["sqrt(m)"], u.m ** 0.5),
(["dB(mW)", "dB (mW)"], u.DecibelUnit(u.mW)),
(["mag"], u.mag),
(["mag(ct/s)"], u.MagUnit(u.ct / u.s)),
(["dex"], u.dex),
(["dex(cm s**-2)", "dex(cm/s2)"], u.DexUnit(u.cm / u.s**2))])
def test_unit_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.Generic.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', ['sin( /pixel /s)', 'mag(mag)',
'dB(dB(mW))', 'dex()'])
def test_unit_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.Generic.parse(string)
@pytest.mark.parametrize('strings, unit', [
(["0.1nm"], u.AA),
(["mW/m2"], u.Unit(u.erg / u.cm ** 2 / u.s)),
(["mW/(m2)"], u.Unit(u.erg / u.cm ** 2 / u.s)),
(["km/s", "km.s-1"], u.km / u.s),
(["10pix/nm"], u.Unit(10 * u.pix / u.nm)),
(["1.5x10+11m"], u.Unit(1.5e11 * u.m)),
(["1.5×10+11m"], u.Unit(1.5e11 * u.m)),
(["m2"], u.m ** 2),
(["10+21m"], u.Unit(u.m * 1e21)),
(["2.54cm"], u.Unit(u.cm * 2.54)),
(["20%"], 0.20 * u.dimensionless_unscaled),
(["10+9"], 1.e9 * u.dimensionless_unscaled),
(["2x10-9"], 2.e-9 * u.dimensionless_unscaled),
(["---"], u.dimensionless_unscaled),
(["ma"], u.ma),
(["mAU"], u.mAU),
(["uarcmin"], u.uarcmin),
(["uarcsec"], u.uarcsec),
(["kbarn"], u.kbarn),
(["Gbit"], u.Gbit),
(["Gibit"], 2 ** 30 * u.bit),
(["kbyte"], u.kbyte),
(["mRy"], 0.001 * u.Ry),
(["mmag"], u.mmag),
(["Mpc"], u.Mpc),
(["Gyr"], u.Gyr),
(["°"], u.degree),
(["°/s"], u.degree / u.s),
(["Å"], u.AA),
(["Å/s"], u.AA / u.s),
(["\\h"], si.h)])
def test_cds_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.CDS.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', [
'0.1 nm',
'solMass(3/2)',
'km / s',
'km s-1',
'pix0.1nm',
'pix/(0.1nm)',
'km*s',
'km**2',
'5x8+3m',
'0.1---',
'---m',
'm---',
'mag(s-1)',
'dB(mW)',
'dex(cm s-2)'])
def test_cds_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.CDS.parse(string)
# These examples are taken from the EXAMPLES section of
# https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/general/ogip_93_001/
@pytest.mark.parametrize('strings, unit', [
(["count /s", "count/s", "count s**(-1)", "count / s", "count /s "],
u.count / u.s),
(["/pixel /s", "/(pixel * s)"], (u.pixel * u.s) ** -1),
(["count /m**2 /s /eV", "count m**(-2) * s**(-1) * eV**(-1)",
"count /(m**2 * s * eV)"],
u.count * u.m ** -2 * u.s ** -1 * u.eV ** -1),
(["erg /pixel /s /GHz", "erg /s /GHz /pixel", "erg /pixel /(s * GHz)"],
u.erg / (u.s * u.GHz * u.pixel)),
(["keV**2 /yr /angstrom", "10**(10) keV**2 /yr /m"],
# Though this is given as an example, it seems to violate the rules
# of not raising scales to powers, so I'm just excluding it
# "(10**2 MeV)**2 /yr /m"
u.keV**2 / (u.yr * u.angstrom)),
(["10**(46) erg /s", "10**46 erg /s", "10**(39) J /s", "10**(39) W",
"10**(15) YW", "YJ /fs"],
10**46 * u.erg / u.s),
(["10**(-7) J /cm**2 /MeV", "10**(-9) J m**(-2) eV**(-1)",
"nJ m**(-2) eV**(-1)", "nJ /m**2 /eV"],
10 ** -7 * u.J * u.cm ** -2 * u.MeV ** -1),
(["sqrt(erg /pixel /s /GHz)", "(erg /pixel /s /GHz)**(0.5)",
"(erg /pixel /s /GHz)**(1/2)",
"erg**(0.5) pixel**(-0.5) s**(-0.5) GHz**(-0.5)"],
(u.erg * u.pixel ** -1 * u.s ** -1 * u.GHz ** -1) ** 0.5),
(["(count /s) (/pixel /s)", "(count /s) * (/pixel /s)",
"count /pixel /s**2"],
(u.count / u.s) * (1.0 / (u.pixel * u.s)))])
def test_ogip_grammar(strings, unit):
for s in strings:
print(s)
unit2 = u_format.OGIP.parse(s)
assert unit2 == unit
@pytest.mark.parametrize('string', [
'log(photon /m**2 /s /Hz)',
'sin( /pixel /s)',
'log(photon /cm**2 /s /Hz) /(sin( /pixel /s))',
'log(photon /cm**2 /s /Hz) (sin( /pixel /s))**(-1)',
'dB(mW)', 'dex(cm/s**2)'])
def test_ogip_grammar_fail(string):
with pytest.raises(ValueError):
print(string)
u_format.OGIP.parse(string)
@pytest.mark.parametrize('unit', [val for key, val in u.__dict__.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip(unit):
a = core.Unit(unit.to_string('generic'), format='generic')
b = core.Unit(unit.decompose().to_string('generic'), format='generic')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2)
@pytest.mark.parametrize('unit', [
val for key, val in u_format.VOUnit._units.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip_vo_unit(unit):
a = core.Unit(unit.to_string('vounit'), format='vounit')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
if unit not in (u.mag, u.dB):
ud = unit.decompose().to_string('vounit')
assert ' ' not in ud
b = core.Unit(ud, format='vounit')
assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2)
@pytest.mark.parametrize('unit', [
val for key, val in u_format.Fits._units.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip_fits(unit):
s = unit.to_string('fits')
a = core.Unit(s, format='fits')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
@pytest.mark.parametrize('unit', [
val for key, val in u_format.CDS._units.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip_cds(unit):
a = core.Unit(unit.to_string('cds'), format='cds')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
try:
b = core.Unit(unit.decompose().to_string('cds'), format='cds')
except ValueError: # skip mag: decomposes into dex, unknown to OGIP
return
assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2)
@pytest.mark.parametrize('unit', [
val for key, val in u_format.OGIP._units.items()
if (isinstance(val, core.UnitBase) and
not isinstance(val, core.PrefixUnit))])
def test_roundtrip_ogip(unit):
a = core.Unit(unit.to_string('ogip'), format='ogip')
assert_allclose(a.decompose().scale, unit.decompose().scale, rtol=1e-2)
try:
b = core.Unit(unit.decompose().to_string('ogip'), format='ogip')
except ValueError: # skip mag: decomposes into dex, unknown to OGIP
return
assert_allclose(b.decompose().scale, unit.decompose().scale, rtol=1e-2)
def test_fits_units_available():
u_format.Fits._units
def test_vo_units_available():
u_format.VOUnit._units
def test_cds_units_available():
u_format.CDS._units
def test_cds_non_ascii_unit():
"""Regression test for #5350. This failed with a decoding error as
μas could not be represented in ascii."""
from .. import cds
with cds.enable():
u.radian.find_equivalent_units(include_prefix_units=True)
def test_latex():
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert fluxunit.to_string('latex') == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$'
def test_new_style_latex():
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert "{0:latex}".format(fluxunit) == r'$\mathrm{\frac{erg}{s\,cm^{2}}}$'
def test_latex_scale():
fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz))
latex = r'$\mathrm{1 \times 10^{-24}\,\frac{erg}{Hz\,s\,cm^{2}}}$'
assert fluxunit.to_string('latex') == latex
def test_latex_inline_scale():
fluxunit = u.Unit(1.e-24 * u.erg / (u.cm ** 2 * u.s * u.Hz))
latex_inline = (r'$\mathrm{1 \times 10^{-24}\,erg'
r'\,Hz^{-1}\,s^{-1}\,cm^{-2}}$')
assert fluxunit.to_string('latex_inline') == latex_inline
@pytest.mark.parametrize('format_spec, string', [
('generic', 'erg / (cm2 s)'),
('s', 'erg / (cm2 s)'),
('console', ' erg \n ------\n s cm^2'),
('latex', '$\\mathrm{\\frac{erg}{s\\,cm^{2}}}$'),
('latex_inline', '$\\mathrm{erg\\,s^{-1}\\,cm^{-2}}$'),
('>20s', ' erg / (cm2 s)')])
def test_format_styles(format_spec, string):
fluxunit = u.erg / (u.cm ** 2 * u.s)
assert format(fluxunit, format_spec) == string
def test_flatten_to_known():
myunit = u.def_unit("FOOBAR_One", u.erg / u.Hz)
assert myunit.to_string('fits') == 'erg Hz-1'
myunit2 = myunit * u.bit ** 3
assert myunit2.to_string('fits') == 'bit3 erg Hz-1'
def test_flatten_impossible():
myunit = u.def_unit("FOOBAR_Two")
with u.add_enabled_units(myunit), pytest.raises(ValueError):
myunit.to_string('fits')
def test_console_out():
"""
Issue #436.
"""
u.Jy.decompose().to_string('console')
def test_flexible_float():
assert u.min._represents.to_string('latex') == r'$\mathrm{60\,s}$'
def test_fraction_repr():
area = u.cm ** 2.0
assert '.' not in area.to_string('latex')
fractional = u.cm ** 2.5
assert '5/2' in fractional.to_string('latex')
assert fractional.to_string('unicode') == 'cm⁵⸍²'
def test_scale_effectively_unity():
"""Scale just off unity at machine precision level is OK.
Ensures #748 does not recur
"""
a = (3. * u.N).cgs
assert is_effectively_unity(a.unit.scale)
assert len(a.__repr__().split()) == 3
def test_percent():
"""Test that the % unit is properly recognized. Since % is a special
symbol, this goes slightly beyond the roundtripping tested above."""
assert u.Unit('%') == u.percent == u.Unit(0.01)
assert u.Unit('%', format='cds') == u.Unit(0.01)
assert u.Unit(0.01).to_string('cds') == '%'
with pytest.raises(ValueError):
u.Unit('%', format='fits')
with pytest.raises(ValueError):
u.Unit('%', format='vounit')
def test_scaled_dimensionless():
"""Test that scaled dimensionless units are properly recognized in generic
and CDS, but not in fits and vounit."""
assert u.Unit('0.1') == u.Unit(0.1) == 0.1 * u.dimensionless_unscaled
assert u.Unit('1.e-4') == u.Unit(1.e-4)
assert u.Unit('10-4', format='cds') == u.Unit(1.e-4)
assert u.Unit('10+8').to_string('cds') == '10+8'
with pytest.raises(ValueError):
u.Unit(0.15).to_string('fits')
assert u.Unit(0.1).to_string('fits') == '10**-1'
with pytest.raises(ValueError):
u.Unit(0.1).to_string('vounit')
def test_deprecated_did_you_mean_units():
try:
u.Unit('ANGSTROM', format='fits')
except ValueError as e:
assert 'Did you mean Angstrom or angstrom?' in str(e)
try:
u.Unit('crab', format='ogip')
except ValueError as e:
assert 'Crab (deprecated)' in str(e)
assert 'mCrab (deprecated)' in str(e)
try:
u.Unit('ANGSTROM', format='vounit')
except ValueError as e:
assert 'angstrom (deprecated)' in str(e)
assert '0.1nm' in str(e)
assert str(e).count('0.1nm') == 1
with catch_warnings() as w:
u.Unit('angstrom', format='vounit')
assert len(w) == 1
assert '0.1nm' in str(w[0].message)
@pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)'])
def test_fits_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.Fits().parse(string)
@pytest.mark.parametrize('string', ['mag(ct/s)', 'dB(mW)', 'dex(cm s**-2)'])
def test_vounit_function(string):
# Function units cannot be written, so ensure they're not parsed either.
with pytest.raises(ValueError):
print(string)
u_format.VOUnit().parse(string)
def test_vounit_binary_prefix():
u.Unit('KiB', format='vounit') == u.Unit('1024 B')
u.Unit('Kibyte', format='vounit') == u.Unit('1024 B')
u.Unit('Kibit', format='vounit') == u.Unit('1024 B')
with catch_warnings() as w:
u.Unit('kibibyte', format='vounit')
assert len(w) == 1
def test_vounit_unknown():
assert u.Unit('unknown', format='vounit') is None
assert u.Unit('UNKNOWN', format='vounit') is None
assert u.Unit('', format='vounit') is u.dimensionless_unscaled
def test_vounit_details():
assert u.Unit('Pa', format='vounit') is u.Pascal
# The da- prefix is not allowed, and the d- prefix is discouraged
assert u.dam.to_string('vounit') == '10m'
assert u.Unit('dam dag').to_string('vounit') == '100g m'
def test_vounit_custom():
x = u.Unit("'foo' m", format='vounit')
x_vounit = x.to_string('vounit')
assert x_vounit == "'foo' m"
x_string = x.to_string()
assert x_string == "foo m"
x = u.Unit("m'foo' m", format='vounit')
assert x.bases[1]._represents.scale == 0.001
x_vounit = x.to_string('vounit')
assert x_vounit == "m m'foo'"
x_string = x.to_string()
assert x_string == 'm mfoo'
def test_vounit_implicit_custom():
x = u.Unit("furlong/week", format="vounit")
assert x.bases[0]._represents.scale == 1e-15
assert x.bases[0]._represents.bases[0].name == 'urlong'
def test_fits_scale_factor():
with pytest.raises(ValueError):
x = u.Unit('1000 erg/s/cm**2/Angstrom', format='fits')
with pytest.raises(ValueError):
x = u.Unit('12 erg/s/cm**2/Angstrom', format='fits')
x = u.Unit('10+2 erg/s/cm**2/Angstrom', format='fits')
assert x == 100 * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**2 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10**(-20) erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10**-20 erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10^(-20) erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10^-20 erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10-20 erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
assert x.to_string(format='fits') == '10**-20 Angstrom-1 cm-2 erg s-1'
x = u.Unit('10**(-20)*erg/s/cm**2/Angstrom', format='fits')
assert x == 10**(-20) * (u.erg / u.s / u.cm ** 2 / u.Angstrom)
x = u.Unit(1.2 * u.erg)
with pytest.raises(ValueError):
x.to_string(format='fits')
x = u.Unit(100.0 * u.erg)
assert x.to_string(format='fits') == '10**2 erg'
|
5519c26c508f796c3d6c19c47515eeb5e5b05ea8fccc95496466d6eb1e945b8e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for the units package
"""
import pickle
from fractions import Fraction
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ...tests.helper import raises, catch_warnings
from ... import units as u
from ... import constants as c
from .. import utils
def test_getting_started():
"""
Corresponds to "Getting Started" section in the docs.
"""
from .. import imperial
with imperial.enable():
speed_unit = u.cm / u.s
x = speed_unit.to(imperial.mile / u.hour, 1)
assert_allclose(x, 0.02236936292054402)
speed_converter = speed_unit._get_converter("mile hour^-1")
x = speed_converter([1., 1000., 5000.])
assert_allclose(x, [2.23693629e-02, 2.23693629e+01, 1.11846815e+02])
def test_initialisation():
assert u.Unit(u.m) is u.m
ten_meter = u.Unit(10.*u.m)
assert ten_meter == u.CompositeUnit(10., [u.m], [1])
assert u.Unit(ten_meter) is ten_meter
assert u.Unit(10.*ten_meter) == u.CompositeUnit(100., [u.m], [1])
foo = u.Unit('foo', (10. * ten_meter)**2, namespace=locals())
assert foo == u.CompositeUnit(10000., [u.m], [2])
assert u.Unit('m') == u.m
assert u.Unit('') == u.dimensionless_unscaled
assert u.one == u.dimensionless_unscaled
assert u.Unit('10 m') == ten_meter
assert u.Unit(10.) == u.CompositeUnit(10., [], [])
def test_invalid_power():
x = u.m ** Fraction(1, 3)
assert isinstance(x.powers[0], Fraction)
x = u.m ** Fraction(1, 2)
assert isinstance(x.powers[0], float)
# Test the automatic conversion to a fraction
x = u.m ** (1. / 3.)
assert isinstance(x.powers[0], Fraction)
def test_invalid_compare():
assert not (u.m == u.s)
def test_convert():
assert u.h._get_converter(u.s)(1) == 3600
def test_convert_fail():
with pytest.raises(u.UnitsError):
u.cm.to(u.s, 1)
with pytest.raises(u.UnitsError):
(u.cm / u.s).to(u.m, 1)
def test_composite():
assert (u.cm / u.s * u.h)._get_converter(u.m)(1) == 36
assert u.cm * u.cm == u.cm ** 2
assert u.cm * u.cm * u.cm == u.cm ** 3
assert u.Hz.to(1000 * u.Hz, 1) == 0.001
def test_str():
assert str(u.cm) == "cm"
def test_repr():
assert repr(u.cm) == 'Unit("cm")'
def test_represents():
assert u.m.represents is u.m
assert u.km.represents.scale == 1000.
assert u.km.represents.bases == [u.m]
assert u.Ry.scale == 1.0 and u.Ry.bases == [u.Ry]
assert_allclose(u.Ry.represents.scale, 13.605692518464949)
assert u.Ry.represents.bases == [u.eV]
bla = u.def_unit('bla', namespace=locals())
assert bla.represents is bla
blabla = u.def_unit('blabla', 10 * u.hr, namespace=locals())
assert blabla.represents.scale == 10.
assert blabla.represents.bases == [u.hr]
assert blabla.decompose().scale == 10 * 3600
assert blabla.decompose().bases == [u.s]
def test_units_conversion():
assert_allclose(u.kpc.to(u.Mpc), 0.001)
assert_allclose(u.Mpc.to(u.kpc), 1000)
assert_allclose(u.yr.to(u.Myr), 1.e-6)
assert_allclose(u.AU.to(u.pc), 4.84813681e-6)
assert_allclose(u.cycle.to(u.rad), 6.283185307179586)
def test_units_manipulation():
# Just do some manipulation and check it's happy
(u.kpc * u.yr) ** Fraction(1, 3) / u.Myr
(u.AA * u.erg) ** 9
def test_decompose():
assert u.Ry == u.Ry.decompose()
def test_dimensionless_to_si():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the SI system
"""
testunit = ((1.0 * u.kpc) / (1.0 * u.Mpc))
assert testunit.unit.physical_type == 'dimensionless'
assert_allclose(testunit.si, 0.001)
def test_dimensionless_to_cgs():
"""
Issue #1150: Test for conversion of dimensionless quantities
to the CGS system
"""
testunit = ((1.0 * u.m) / (1.0 * u.km))
assert testunit.unit.physical_type == 'dimensionless'
assert_allclose(testunit.cgs, 0.001)
def test_unknown_unit():
with catch_warnings(u.UnitsWarning) as warning_lines:
u.Unit("FOO", parse_strict='warn')
assert 'FOO' in str(warning_lines[0].message)
def test_multiple_solidus():
assert u.Unit("m/s/kg").to_string() == u.m / u.s / u.kg
with catch_warnings(u.UnitsWarning) as warning_lines:
assert u.Unit("m/s/kg").to_string() == u.m / (u.s * u.kg)
assert 'm/s/kg' in str(warning_lines[0].message)
assert 'discouraged' in str(warning_lines[0].message)
with pytest.raises(ValueError):
u.Unit("m/s/kg", format="vounit")
def test_unknown_unit3():
unit = u.Unit("FOO", parse_strict='silent')
assert isinstance(unit, u.UnrecognizedUnit)
assert unit.name == "FOO"
unit2 = u.Unit("FOO", parse_strict='silent')
assert unit == unit2
assert unit.is_equivalent(unit2)
unit3 = u.Unit("BAR", parse_strict='silent')
assert unit != unit3
assert not unit.is_equivalent(unit3)
with pytest.raises(ValueError):
unit._get_converter(unit3)
x = unit.to_string('latex')
y = unit2.to_string('cgs')
with pytest.raises(ValueError):
unit4 = u.Unit("BAR", parse_strict='strict')
with pytest.raises(TypeError):
unit5 = u.Unit(None)
@raises(TypeError)
def test_invalid_scale():
x = ['a', 'b', 'c'] * u.m
def test_cds_power():
unit = u.Unit("10+22/cm2", format="cds", parse_strict='silent')
assert unit.scale == 1e22
def test_register():
foo = u.def_unit("foo", u.m ** 3, namespace=locals())
assert 'foo' in locals()
with u.add_enabled_units(foo):
assert 'foo' in u.get_current_unit_registry().registry
assert 'foo' not in u.get_current_unit_registry().registry
def test_in_units():
speed_unit = u.cm / u.s
x = speed_unit.in_units(u.pc / u.hour, 1)
def test_null_unit():
assert (u.m / u.m) == u.Unit(1)
def test_unrecognized_equivalency():
assert u.m.is_equivalent('foo') is False
assert u.m.is_equivalent('pc') is True
@raises(TypeError)
def test_unit_noarg():
u.Unit()
def test_convertible_exception():
try:
u.AA.to(u.h * u.s ** 2)
except u.UnitsError as e:
assert "length" in str(e)
def test_convertible_exception2():
try:
u.m.to(u.s)
except u.UnitsError as e:
assert "length" in str(e)
@raises(TypeError)
def test_invalid_type():
class A:
pass
u.Unit(A())
def test_steradian():
"""
Issue #599
"""
assert u.sr.is_equivalent(u.rad * u.rad)
results = u.sr.compose(units=u.cgs.bases)
assert results[0].bases[0] is u.rad
results = u.sr.compose(units=u.cgs.__dict__)
assert results[0].bases[0] is u.sr
def test_decompose_bases():
"""
From issue #576
"""
from .. import cgs
from ...constants import e
d = e.esu.unit.decompose(bases=cgs.bases)
assert d._bases == [u.cm, u.g, u.s]
assert d._powers == [Fraction(3, 2), 0.5, -1]
assert d._scale == 1.0
def test_complex_compose():
complex = u.cd * u.sr * u.Wb
composed = complex.compose()
assert set(composed[0]._bases) == set([u.lm, u.Wb])
def test_equiv_compose():
composed = u.m.compose(equivalencies=u.spectral())
assert any([u.Hz] == x.bases for x in composed)
def test_empty_compose():
with pytest.raises(u.UnitsError):
composed = u.m.compose(units=[])
def _unit_as_str(unit):
# This function serves two purposes - it is used to sort the units to
# test alphabetically, and it is also use to allow pytest to show the unit
# in the [] when running the parametrized tests.
return str(unit)
# We use a set to make sure we don't have any duplicates.
COMPOSE_ROUNDTRIP = set()
for val in u.__dict__.values():
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit)):
COMPOSE_ROUNDTRIP.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_ROUNDTRIP, key=_unit_as_str), ids=_unit_as_str)
def test_compose_roundtrip(unit):
composed_list = unit.decompose().compose()
found = False
for composed in composed_list:
if len(composed.bases):
if composed.bases[0] is unit:
found = True
break
elif len(unit.bases) == 0:
found = True
break
assert found
# We use a set to make sure we don't have any duplicates.
COMPOSE_CGS_TO_SI = set()
for val in u.cgs.__dict__.values():
# Can't decompose Celsius
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit) and
val != u.cgs.deg_C):
COMPOSE_CGS_TO_SI.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_CGS_TO_SI, key=_unit_as_str),
ids=_unit_as_str)
def test_compose_cgs_to_si(unit):
si = unit.to_system(u.si)
assert [x.is_equivalent(unit) for x in si]
assert si[0] == unit.si
# We use a set to make sure we don't have any duplicates.
COMPOSE_SI_TO_CGS = set()
for val in u.si.__dict__.values():
# Can't decompose Celsius
if (isinstance(val, u.UnitBase) and
not isinstance(val, u.PrefixUnit) and
val != u.si.deg_C):
COMPOSE_SI_TO_CGS.add(val)
@pytest.mark.parametrize('unit', sorted(COMPOSE_SI_TO_CGS, key=_unit_as_str), ids=_unit_as_str)
def test_compose_si_to_cgs(unit):
# Can't convert things with Ampere to CGS without more context
try:
cgs = unit.to_system(u.cgs)
except u.UnitsError:
if u.A in unit.decompose().bases:
pass
else:
raise
else:
assert [x.is_equivalent(unit) for x in cgs]
assert cgs[0] == unit.cgs
def test_to_cgs():
assert u.Pa.to_system(u.cgs)[1]._bases[0] is u.Ba
assert u.Pa.to_system(u.cgs)[1]._scale == 10.0
def test_decompose_to_cgs():
from .. import cgs
assert u.m.decompose(bases=cgs.bases)._bases[0] is cgs.cm
def test_compose_issue_579():
unit = u.kg * u.s ** 2 / u.m
result = unit.compose(units=[u.N, u.s, u.m])
assert len(result) == 1
assert result[0]._bases == [u.s, u.N, u.m]
assert result[0]._powers == [4, 1, -2]
def test_compose_prefix_unit():
x = u.m.compose(units=(u.m,))
assert x[0].bases[0] is u.m
assert x[0].scale == 1.0
x = u.m.compose(units=[u.km], include_prefix_units=True)
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = u.m.compose(units=[u.km])
assert x[0].bases[0] is u.km
assert x[0].scale == 0.001
x = (u.km/u.s).compose(units=(u.pc, u.Myr))
assert x[0].bases == [u.pc, u.Myr]
assert_allclose(x[0].scale, 1.0227121650537077)
with raises(u.UnitsError):
(u.km/u.s).compose(units=(u.pc, u.Myr), include_prefix_units=False)
def test_self_compose():
unit = u.kg * u.s
assert len(unit.compose(units=[u.g, u.s])) == 1
@raises(u.UnitsError)
def test_compose_failed():
unit = u.kg
result = unit.compose(units=[u.N])
def test_compose_fractional_powers():
# Warning: with a complicated unit, this test becomes very slow;
# e.g., x = (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2)
# takes 3 s
x = u.m ** 0.5 / u.yr ** 1.5
factored = x.compose()
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.cgs)
for unit in factored:
assert x.decompose() == unit.decompose()
factored = x.compose(units=u.si)
for unit in factored:
assert x.decompose() == unit.decompose()
def test_compose_best_unit_first():
results = u.l.compose()
assert len(results[0].bases) == 1
assert results[0].bases[0] is u.l
results = (u.s ** -1).compose()
assert results[0].bases[0] in (u.Hz, u.Bq)
results = (u.Ry.decompose()).compose()
assert results[0].bases[0] is u.Ry
def test_compose_no_duplicates():
new = u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2
composed = new.compose(units=u.cgs.bases)
assert len(composed) == 1
def test_long_int():
"""
Issue #672
"""
sigma = 10 ** 21 * u.M_p / u.cm ** 2
sigma.to(u.M_sun / u.pc ** 2)
def test_endian_independence():
"""
Regression test for #744
A logic issue in the units code meant that big endian arrays could not be
converted because the dtype is '>f4', not 'float32', and the code was
looking for the strings 'float' or 'int'.
"""
for endian in ['<', '>']:
for ntype in ['i', 'f']:
for byte in ['4', '8']:
x = np.array([1, 2, 3], dtype=(endian + ntype + byte))
u.m.to(u.cm, x)
def test_radian_base():
"""
Issue #863
"""
assert (1 * u.degree).si.unit == u.rad
def test_no_as():
# We don't define 'as', since it is a keyword, but we
# do want to define the long form (`attosecond`).
assert not hasattr(u, 'as')
assert hasattr(u, 'attosecond')
def test_no_duplicates_in_names():
# Regression test for #5036
assert u.ct.names == ['ct', 'count']
assert u.ct.short_names == ['ct', 'count']
assert u.ct.long_names == ['count']
assert set(u.ph.names) == set(u.ph.short_names) | set(u.ph.long_names)
def test_pickling():
p = pickle.dumps(u.m)
other = pickle.loads(p)
assert other is u.m
new_unit = u.IrreducibleUnit(['foo'], format={'baz': 'bar'})
# This is local, so the unit should not be registered.
assert 'foo' not in u.get_current_unit_registry().registry
# Test pickling of this unregistered unit.
p = pickle.dumps(new_unit)
new_unit_copy = pickle.loads(p)
assert new_unit_copy.names == ['foo']
assert new_unit_copy.get_format_name('baz') == 'bar'
# It should still not be registered.
assert 'foo' not in u.get_current_unit_registry().registry
# Now try the same with a registered unit.
with u.add_enabled_units([new_unit]):
p = pickle.dumps(new_unit)
assert 'foo' in u.get_current_unit_registry().registry
# Check that a registered unit can be loaded and that it gets re-enabled.
with u.add_enabled_units([]):
assert 'foo' not in u.get_current_unit_registry().registry
new_unit_copy = pickle.loads(p)
assert new_unit_copy.names == ['foo']
assert new_unit_copy.get_format_name('baz') == 'bar'
assert 'foo' in u.get_current_unit_registry().registry
# And just to be sure, that it gets removed outside of the context.
assert 'foo' not in u.get_current_unit_registry().registry
def test_pickle_unrecognized_unit():
"""
Issue #2047
"""
a = u.Unit('asdf', parse_strict='silent')
pickle.loads(pickle.dumps(a))
@raises(ValueError)
def test_duplicate_define():
u.def_unit('m', namespace=u.__dict__)
def test_all_units():
from ...units.core import get_current_unit_registry
registry = get_current_unit_registry()
assert len(registry.all_units) > len(registry.non_prefix_units)
def test_repr_latex():
assert u.m._repr_latex_() == u.m.to_string('latex')
def test_operations_with_strings():
assert u.m / '5s' == (u.m / (5.0 * u.s))
assert u.m * '5s' == (5.0 * u.m * u.s)
def test_comparison():
assert u.m > u.cm
assert u.m >= u.cm
assert u.cm < u.m
assert u.cm <= u.m
with pytest.raises(u.UnitsError):
u.m > u.kg
def test_compose_into_arbitrary_units():
# Issue #1438
from ...constants import G
G.decompose([u.kg, u.km, u.Unit("15 s")])
def test_unit_multiplication_with_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = 'kg'
assert us * u1 == u.Unit(us) * u1
assert u1 * us == u1 * u.Unit(us)
def test_unit_division_by_string():
"""Check that multiplication with strings produces the correct unit."""
u1 = u.cm
us = 'kg'
assert us / u1 == u.Unit(us) / u1
assert u1 / us == u1 / u.Unit(us)
def test_sorted_bases():
"""See #1616."""
assert (u.m * u.Jy).bases == (u.Jy * u.m).bases
def test_megabit():
"""See #1543"""
assert u.Mbit is u.Mb
assert u.megabit is u.Mb
assert u.Mbyte is u.MB
assert u.megabyte is u.MB
def test_composite_unit_get_format_name():
"""See #1576"""
unit1 = u.Unit('nrad/s')
unit2 = u.Unit('Hz(1/2)')
assert (str(u.CompositeUnit(1, [unit1, unit2], [1, -1])) ==
'nrad / (Hz(1/2) s)')
def test_unicode_policy():
from ...tests.helper import assert_follows_unicode_guidelines
assert_follows_unicode_guidelines(
u.degree, roundtrip=u.__dict__)
def test_suggestions():
for search, matches in [
('microns', 'micron'),
('s/microns', 'micron'),
('M', 'm'),
('metre', 'meter'),
('angstroms', 'Angstrom or angstrom'),
('milimeter', 'millimeter'),
('ångström', 'Angstrom or angstrom'),
('kev', 'EV, eV, kV or keV')]:
try:
u.Unit(search)
except ValueError as e:
assert 'Did you mean {0}?'.format(matches) in str(e)
else:
assert False, 'Expected ValueError'
def test_fits_hst_unit():
"""See #1911."""
x = u.Unit("erg /s /cm**2 /angstrom")
assert x == u.erg * u.s ** -1 * u.cm ** -2 * u.angstrom ** -1
def test_barn_prefixes():
"""Regression test for https://github.com/astropy/astropy/issues/3753"""
assert u.fbarn is u.femtobarn
assert u.pbarn is u.picobarn
def test_fractional_powers():
"""See #2069"""
m = 1e9 * u.Msun
tH = 1. / (70. * u.km / u.s / u.Mpc)
vc = 200 * u.km/u.s
x = (c.G ** 2 * m ** 2 * tH.cgs) ** Fraction(1, 3) / vc
v1 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH) ** Fraction(1, 3) / vc
v2 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH.cgs) ** (1.0 / 3.0) / vc
v3 = x.to('pc')
x = (c.G ** 2 * m ** 2 * tH) ** (1.0 / 3.0) / vc
v4 = x.to('pc')
assert_allclose(v1, v2)
assert_allclose(v2, v3)
assert_allclose(v3, v4)
x = u.m ** (1.0 / 11.0)
assert isinstance(x.powers[0], float)
x = u.m ** (3.0 / 7.0)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 3
assert x.powers[0].denominator == 7
x = u.cm ** Fraction(1, 2) * u.cm ** Fraction(2, 3)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0] == Fraction(7, 6)
def test_inherit_docstrings():
assert u.UnrecognizedUnit.is_unity.__doc__ == u.UnitBase.is_unity.__doc__
def test_sqrt_mag():
sqrt_mag = u.mag ** 0.5
assert hasattr(sqrt_mag.decompose().scale, 'imag')
assert (sqrt_mag.decompose())**2 == u.mag
def test_composite_compose():
# Issue #2382
composite_unit = u.s.compose(units=[u.Unit("s")])[0]
u.s.compose(units=[composite_unit])
def test_data_quantities():
assert u.byte.is_equivalent(u.bit)
def test_compare_with_none():
# Ensure that equality comparisons with `None` work, and don't
# raise exceptions. We are deliberately not using `is None` here
# because that doesn't trigger the bug. See #3108.
assert not (u.m == None) # nopep8
assert u.m != None # nopep8
def test_validate_power_detect_fraction():
frac = utils.validate_power(1.1666666666666665)
assert isinstance(frac, Fraction)
assert frac.numerator == 7
assert frac.denominator == 6
def test_complex_fractional_rounding_errors():
# See #3788
kappa = 0.34 * u.cm**2 / u.g
r_0 = 886221439924.7849 * u.cm
q = 1.75
rho_0 = 5e-10 * u.solMass / u.solRad**3
y = 0.5
beta = 0.19047619047619049
a = 0.47619047619047628
m_h = 1e6*u.solMass
t1 = 2 * c.c / (kappa * np.sqrt(np.pi))
t2 = (r_0**-q) / (rho_0 * y * beta * (a * c.G * m_h)**0.5)
result = ((t1 * t2)**-0.8)
assert result.unit.physical_type == 'length'
result.to(u.solRad)
def test_fractional_rounding_errors_simple():
x = (u.m ** 1.5) ** Fraction(4, 5)
assert isinstance(x.powers[0], Fraction)
assert x.powers[0].numerator == 6
assert x.powers[0].denominator == 5
def test_enable_unit_groupings():
from ...units import cds
with cds.enable():
assert cds.geoMass in u.kg.find_equivalent_units()
from ...units import imperial
with imperial.enable():
assert imperial.inch in u.m.find_equivalent_units()
def test_unit_summary_prefixes():
"""
Test for a few units that the unit summary table correctly reports
whether or not that unit supports prefixes.
Regression test for https://github.com/astropy/astropy/issues/3835
"""
from .. import astrophys
for summary in utils._iter_unit_summary(astrophys.__dict__):
unit, _, _, _, prefixes = summary
if unit.name == 'lyr':
assert prefixes
elif unit.name == 'pc':
assert prefixes
elif unit.name == 'barn':
assert prefixes
elif unit.name == 'cycle':
assert prefixes == 'No'
elif unit.name == 'vox':
assert prefixes == 'Yes'
|
9c03d695704ced6b3a77def23a01b7661a513554edb569461536148605b5f33a | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
import pickle
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ...tests.helper import assert_quantity_allclose
from ... import units as u, constants as c
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
class TestLogUnitCreation:
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize('lu_unit', lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize('lu_cls, physical_unit', itertools.product(
lu_subclasses + [u.LogUnit], pu_sample))
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit,
function_unit=2*lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2*lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_predefined_magnitudes():
assert_quantity_allclose((-21.1*u.STmag).physical,
1.*u.erg/u.cm**2/u.s/u.AA)
assert_quantity_allclose((-48.6*u.ABmag).physical,
1.*u.erg/u.cm**2/u.s/u.Hz)
assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0)
assert_quantity_allclose((0*u.m_bol).physical,
c.L_bol0/(4.*np.pi*(10.*c.pc)**2))
def test_predefined_reinitialisation():
assert u.mag('ST') == u.STmag
assert u.mag('AB') == u.ABmag
assert u.mag('Bol') == u.M_bol
assert u.mag('bol') == u.m_bol
def test_predefined_string_roundtrip():
"""Ensure roundtripping; see #5015"""
with u.magnitude_zero_points.enable():
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regresssion for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == 'mag(Jy)'
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string('generic') == 'mag(Jy)'
with pytest.raises(ValueError):
lu1.to_string('fits')
lu2 = u.dex()
assert str(lu2) == 'dex'
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == 'dex(1)'
lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag)
assert str(lu3) == '2 mag(Jy)'
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == '2 mag(Jy)'
lu4 = u.mag(u.ct)
assert lu4.to_string('generic') == 'mag(ct)'
assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( '
'\\mathrm{ct} \\right)}$')
assert lu4._repr_latex_() == lu4.to_string('latex')
class TestLogUnitConversion:
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.) == 1.
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.) == 0.
pu = u.Unit(8.*physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15)
# Check we round-trip.
value = np.linspace(0., 10., 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize('lu_unit', lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0., 10., 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
'flu_unit, tlu_unit, physical_unit',
itertools.product(lu_units, lu_units, pu_sample))
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0., 10., 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(flu.to(tlu, values),
values * flu.function_unit.to(tlu.function_unit))
tlu2 = tlu_unit(u.Unit(100.*physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
class TestLogUnitArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1 ** power == u.dimensionless_unscaled
elif power == 1:
assert lu1 ** power == lu1
else:
with pytest.raises(u.UnitsError):
lu1 ** power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t**(1./power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)),
lu2.to(lu2.physical_unit, np.arange(3.)))
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
@pytest.mark.parametrize(
'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation:
@pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity],
lu_subclasses + [u.LogUnit]))
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.)) is lq
@pytest.mark.parametrize('lq_cls, physical_unit',
itertools.product(lq_subclasses, pu_sample))
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1., 10.)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, 'function_unit', unit)
assert q.unit.physical_unit is getattr(unit, 'physical_unit',
u.dimensionless_unscaled)
@pytest.mark.parametrize('value, unit', (
(1.*u.mag(u.Jy), None),
(1.*u.dex(u.Jy), None),
(1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)),
(1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy))))
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(unit, 'physical_unit',
value.unit.physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100. * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.
assert (q2._function_view / u.mag).to_value(1) == -5.
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews:
def setup(self):
self.lq = u.Magnitude(np.arange(10.) * u.Jy)
self.lq2 = u.Magnitude(np.arange(5.))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2. * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing:
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy)
assert lq1[9] == u.Magnitude(10.*u.Jy)
lq1[2] = 100.*u.Jy
assert lq1[2] == u.Magnitude(100.*u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.*u.m)
assert lq1[2] == u.Magnitude(100.*u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy)
lq1[2:4] = 100.*u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.*u.m)
assert np.all(lq1[2] == u.Magnitude(100.*u.Jy))
class TestLogQuantityArithmetic:
def test_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
if power == 0:
assert np.all(lq ** power == 1.)
elif power == 1:
assert np.all(lq ** power == lq)
else:
with pytest.raises(u.UnitsError):
lq ** power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit ** power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
with pytest.raises(TypeError):
lq ** lq
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize('other', pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1., 10.), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons:
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
with pytest.raises(TypeError):
lq > 'a'
assert not (lq == 'a')
assert lq != 'a'
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy)
lq2 = u.Magnitude(2.*u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.*u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.*u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.*u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1., 4.))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.*u.m
class TestLogQuantityMethods:
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace',
'std', 'var', 'ptp', 'diff', 'ediff1d'))
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value ==
getattr(mag._function_view, method)().value)
if method in ('std', 'ptp', 'diff', 'ediff1d'):
assert res.unit == u.mag()
elif method == 'var':
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value ==
mag.value.clip(2., 4.))
@pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum'))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value ==
getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value ==
self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize('method', ('prod', 'cumprod'))
def test_never_ok(self, method):
with pytest.raises(ValueError):
getattr(self.mJy, method)()
with pytest.raises(ValueError):
getattr(self.m1, method)()
class TestLogQuantityUfuncs:
"""Spot checks on ufuncs."""
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
def test_power(self):
assert np.all(np.power(self.mJy, 0.) == 1.)
assert np.all(np.power(self.m1, 1.) == self.m1)
assert np.all(np.power(self.mJy, 1.) == self.mJy)
assert np.all(np.power(self.m1, 2.) == self.m1 ** 2)
with pytest.raises(u.UnitsError):
np.power(self.mJy, 2.)
def test_not_implemented_with_physical_unit(self):
with pytest.raises(u.UnitsError):
np.square(self.mJy)
assert np.all(np.square(self.m1) == self.m1 ** 2)
|
0b087e5b3e1ffca81c6fff23b4e59208f71f2e4a9b7fd20841affe354aaee387 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import warnings
from collections import namedtuple
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ... import units as u
from ...tests.helper import raises
from ...utils.compat import NUMPY_LT_1_13
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
testcase = namedtuple('testcase', ['f', 'q_in', 'q_out'])
testexc = namedtuple('testexc', ['f', 'q_in', 'exc', 'msg'])
testwarn = namedtuple('testwarn', ['f', 'q_in', 'wfilter'])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results, ) if type(results) != tuple else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.E-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncCoverage:
"""Test that we cover all ufunc's"""
# Ignore possible scipy ufuncs; for scipy in particular, we have support
# for some, but for others it still has to be decided whether we can
# support them or not.
@pytest.mark.skipif(HAS_SCIPY,
reason='scipy.special coverage is incomplete')
def test_coverage(self):
all_extern_ufuncs = set([])
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if type(ufunc) == np.ufunc])
all_extern_ufuncs |= all_np_ufuncs
from .. import quantity_helper as qh
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
assert all_extern_ufuncs - all_q_ufuncs == set([])
assert all_q_ufuncs - all_extern_ufuncs == set([])
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize('tc', (
testcase(
f=np.sin,
q_in=(30. * u.degree, ),
q_out=(0.5*u.dimensionless_unscaled, )
),
testcase(
f=np.sin,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([0., 1. / np.sqrt(2.), 1.]) * u.one, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(30. * u.degree), ),
q_out=(np.radians(30.) * u.radian, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, )
),
testcase(
f=np.cos,
q_in=(np.pi / 3. * u.radian, ),
q_out=(0.5 * u.dimensionless_unscaled, )
),
testcase(
f=np.cos,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([1., 1. / np.sqrt(2.), 0.]) * u.one, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
),
testcase(
f=np.tan,
q_in=(np.pi / 3. * u.radian, ),
q_out=(np.sqrt(3.) * u.dimensionless_unscaled, )
),
testcase(
f=np.tan,
q_in=(np.array([0., 45., 135., 180.]) * u.degree, ),
q_out=(np.array([0., 1., -1., 0.]) * u.dimensionless_unscaled, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10., 30., 70., 80.]) * u.degree), ),
q_out=(np.radians(np.array([10., 30., 70., 80.]) * u.degree), )
),
testcase(
f=np.arctan2,
q_in=(np.array([10., 30., 70., 80.]) * u.m, 2.0 * u.km),
q_out=(np.arctan2(np.array([10., 30., 70., 80.]),
2000.) * u.radian, )
),
testcase(
f=np.arctan2,
q_in=((np.array([10., 80.]) * u.m / (2.0 * u.km)).to(u.one), 1.),
q_out=(np.arctan2(np.array([10., 80.]) / 2000., 1.) * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.radians,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.radians,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.rad2deg,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.rad2deg,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
)
))
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize('te', (
testexc(
f=np.deg2rad,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.radians,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.rad2deg,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.degrees,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.sin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units"
),
testexc(
f=np.arcsin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities"
),
testexc(
f=np.cos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units"
),
testexc(
f=np.arccos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities"
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units"
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1. * u.s),
exc=u.UnitsError,
msg="compatible dimensions"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.),
exc=u.UnitsError,
msg="dimensionless quantities when other arg"
)
))
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize('tw', (
testwarn(
f=np.arcsin,
q_in=(27. * u.pc / (15 * u.kpc), ),
wfilter='error'
),
))
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
function(np.arange(3.), 2.) * u.m / u.s)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1., 2., 3.]) * u.m
divisor = np.array([3., 4., 5.]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13., 19., 23.])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
if hasattr(np, 'divmod'): # not NUMPY_LT_1_13
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5
def test_sqrt_array(self):
assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
== np.array([1., 2., 3.]) * u.m ** 0.5)
def test_square_scalar(self):
assert np.square(4. * u.m) == 16. * u.m ** 2
def test_square_array(self):
assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
== np.array([1., 4., 9.]) * u.m ** 2)
def test_reciprocal_scalar(self):
assert np.reciprocal(4. * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
== np.array([1., 0.5, 0.25]) / u.m)
# heaviside only introduced in numpy 1.13
@pytest.mark.skipif("not hasattr(np, 'heaviside')")
def test_heaviside_scalar(self):
assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert np.heaviside(0. * u.s,
25 * u.percent) == 0.25 * u.dimensionless_unscaled
assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled
@pytest.mark.skipif("not hasattr(np, 'heaviside')")
def test_heaviside_array(self):
values = np.array([-1., 0., 0., +1.])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(np.heaviside(values * u.m,
halfway * u.dimensionless_unscaled) ==
[0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_scalar(self, function):
assert function(8. * u.m**3) == 2. * u.m
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1., 8., 64.])
assert np.all(function(values * u.m**3) ==
function(values) * u.m)
def test_power_scalar(self):
assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
assert np.power(4., 200. * u.cm / u.m) == \
u.Quantity(16., u.dimensionless_unscaled)
# regression check on #1696
assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
# float_power only introduced in numpy 1.12
@pytest.mark.skipif("not hasattr(np, 'float_power')")
def test_float_power_array(self):
assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
@raises(ValueError)
def test_power_array_array(self):
np.power(4. * u.m, [2., 4.])
@raises(ValueError)
def test_power_array_array2(self):
np.power([2., 4.] * u.m, [2., 4.])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2., 4.] * u.m / u.m
powers = [2., 4.]
res = np.power(q, powers)
assert np.all(res.value == q.value ** powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2., 4.] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2 ** 2
assert np.all(res3.value == q2.value ** 2)
assert res3.unit == q2.unit ** 2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError) as exc:
np.power(3., 4. * u.m)
assert "raise something to a dimensionless" in exc.value.args[0]
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.) == 3. * u.m
assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
assert np.copysign(3 * u.m, -1.) == -3. * u.m
assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m
def test_copysign_array(self):
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s,
np.array([-2., 2., -4.]) * u.m) ==
np.array([-1., 2., -3.]) * u.s)
q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
assert np.all(q == np.array([-1., -2., -3.]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4. * u.m, 2) == 16. * u.m
def test_ldexp_array(self):
assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
== np.array([8., 8., 6.]) * u.m)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3. * u.m, 4.)
with pytest.raises(TypeError):
np.ldexp(3., u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_scalar(self, function):
q = function(3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value
== function(np.array([1. / 3., 1. / 2., 1.])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
def test_modf_scalar(self):
q = np.modf(9. * u.m / (600. * u.cm))
assert q == (0.5 * u.dimensionless_unscaled,
1. * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.) * u.m / (500. * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3. * u.m / (6. * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
np.frexp(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
# also does not work on quantities that can be made dimensionless
with pytest.raises(TypeError) as exc:
np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
function(np.array([100. / 3., 100. / 2., 100.]), 1.))
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(TypeError) as exc:
function(1. * u.km / u.s, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
class TestInvariantUfuncs:
# np.positive was only added in numpy 1.13.
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
np.conj, np.conjugate,
np.negative, np.spacing, np.rint,
np.floor, np.ceil] +
[np.positive] if hasattr(np, 'positive') else [])
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
np.negative, np.rint,
np.floor, np.ceil])
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_one_arbitrary(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
arbitrary_unit_value = np.array([0.])
q_o = ufunc(q_i1, arbitrary_unit_value)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary_unit_value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestComparisonUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(q_o2 == ufunc((q_i1 / q_i2)
.to_value(u.dimensionless_unscaled), 2.))
# comparison with 0., inf, nan is OK even for dimensional quantities
for arbitrary_unit_value in (0., np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0., np.inf, np.nan]))
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestInplaceUfuncs:
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value/10., out=s)
assert check is s
assert np.all(check.value == np.arcsin(value/10.))
assert check.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100. * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v) # cannot use out1,out2 keywords with numpy 1.7
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# in np<1.13, without __array_ufunc__, one cannot replace input with
# first output when scaling
v4 = v_copy.copy()
if NUMPY_LT_1_13:
with pytest.raises(TypeError):
np.modf(v4, v4, tmp)
else:
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.
assert check is s
assert np.all(check.value == value / 2.)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2. * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1. * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2. / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1., 2., 3.]) * u.dimensionless_unscaled
np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert np.all(s.value == np.array([3., 6., 9.]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert_allclose(s.value, np.arctan2(1., 2.))
assert s.unit is u.radian
@pytest.mark.skipif(NUMPY_LT_1_13, reason="numpy >=1.13 required.")
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.*u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1. * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += (20.*u.km)
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.xfail("NUMPY_LT_1_13")
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.*u.km)
np.add.at(check, i, 1000.)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.*u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1*u.s)
# but be fine if it does not
s = np.arange(10.) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.*u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.) * u.m
np.multiply.at(s, i, 2.)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.*u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
@pytest.mark.xfail("NUMPY_LT_1_13")
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
@pytest.mark.xfail("NUMPY_LT_1_13")
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.) * u.m
s2 = np.arange(2.) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
if HAS_SCIPY:
from scipy import special as sps
class TestScipySpecialUfuncs:
erf_like_ufuncs = (
sps.erf, sps.gamma, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.erfc, sps.erfcx, sps.erfi, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize('function', (sps.radian, ))
def test_radian(self, function):
q1 = function(180. * u.degree, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0. * u.degree, 30. * u.arcmin, 0. * u.arcsec)
assert_allclose(q2.value, (30. * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0. * u.degree, 0. * u.arcmin, 30. * u.arcsec)
assert_allclose(q3.value, (30. * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3. * u.radian, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3. * u.m, 2. * u.s, 1. * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2. * u.m / (2. * u.m), 3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_array(self, function):
q = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(
np.ones(3),
np.array([1. / 3., 1. / 2., 1.]))
)
# should also work on quantities that can be made dimensionless
q2 = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.ones(3),
np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(1. * u.kg, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
|
bcbe4ac6ab7e1662b0488b415185d8fcc08a4fa23398c5b7d7a1c10e0cb7ffb0 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions/values used repeatedly in different modules of
the ``builtin_frames`` package.
"""
import warnings
import numpy as np
from ... import units as u
from ... import _erfa as erfa
from ...time import Time
from ...utils import iers
from ...utils.exceptions import AstropyWarning
# The UTC time scale is not properly defined prior to 1960, so Time('B1950',
# scale='utc') will emit a warning. Instead, we use Time('B1950', scale='tai')
# which is equivalent, but does not emit a warning.
EQUINOX_J2000 = Time('J2000', scale='utc')
EQUINOX_B1950 = Time('B1950', scale='tai')
# This is a time object that is the default "obstime" when such an attribute is
# necessary. Currently, we use J2000.
DEFAULT_OBSTIME = Time('J2000', scale='utc')
PIOVER2 = np.pi / 2.
# comes from the mean of the 1962-2014 IERS B data
_DEFAULT_PM = (0.035, 0.29)*u.arcsec
def get_polar_motion(time):
"""
gets the two polar motion components in radians for use with apio13
"""
# Get the polar motion from the IERS table
xp, yp, status = iers.IERS_Auto.open().pm_xy(time, return_status=True)
wmsg = None
if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
wmsg = ('Tried to get polar motions for times before IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those. '
'This may affect precision at the 10s of arcsec level')
xp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
yp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg, AstropyWarning)
if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
wmsg = ('Tried to get polar motions for times after IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those. '
'This may affect precision at the 10s of arcsec level')
xp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
yp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg, AstropyWarning)
return xp.to_value(u.radian), yp.to_value(u.radian)
def _warn_iers(ierserr):
"""
Generate a warning for an IERSRangeerror
Parameters
----------
ierserr : An `~astropy.utils.iers.IERSRangeError`
"""
msg = '{0} Assuming UT1-UTC=0 for coordinate transformations.'
warnings.warn(msg.format(ierserr.args[0]), AstropyWarning)
def get_dut1utc(time):
"""
This function is used to get UT1-UTC in coordinates because normally it
gives an error outside the IERS range, but in coordinates we want to allow
it to go through but with a warning.
"""
try:
return time.delta_ut1_utc
except iers.IERSRangeError as e:
_warn_iers(e)
return np.zeros(time.shape)
def get_jd12(time, scale):
"""
Gets ``jd1`` and ``jd2`` from a time object in a particular scale.
Parameters
----------
time : `~astropy.time.Time`
The time to get the jds for
scale : str
The time scale to get the jds for
Returns
-------
jd1 : float
jd2 : float
"""
if time.scale == scale:
newtime = time
else:
try:
newtime = getattr(time, scale)
except iers.IERSRangeError as e:
_warn_iers(e)
newtime = time
return newtime.jd1, newtime.jd2
def norm(p):
"""
Normalise a p-vector.
"""
if np.__version__ == '1.14.0':
# there is a bug in numpy v1.14.0 (fixed in 1.14.1) that causes
# this einsum call to break with the default of optimize=True
# see https://github.com/astropy/astropy/issues/7051
return p / np.sqrt(np.einsum('...i,...i', p, p, optimize=False))[..., np.newaxis]
else:
return p / np.sqrt(np.einsum('...i,...i', p, p))[..., np.newaxis]
def get_cip(jd1, jd2):
"""
Find the X, Y coordinates of the CIP and the CIO locator, s.
Parameters
----------
jd1 : float or `np.ndarray`
First part of two part Julian date (TDB)
jd2 : float or `np.ndarray`
Second part of two part Julian date (TDB)
Returns
--------
x : float or `np.ndarray`
x coordinate of the CIP
y : float or `np.ndarray`
y coordinate of the CIP
s : float or `np.ndarray`
CIO locator, s
"""
# classical NPB matrix, IAU 2006/2000A
rpnb = erfa.pnm06a(jd1, jd2)
# CIP X, Y coordinates from array
x, y = erfa.bpn2xy(rpnb)
# CIO locator, s
s = erfa.s06(jd1, jd2, x, y)
return x, y, s
def aticq(ri, di, astrom):
"""
A slightly modified version of the ERFA function ``eraAticq``.
``eraAticq`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
The companion function ``eraAtciqz`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
Parameters
----------
ri : float or `~numpy.ndarray`
right ascension, radians
di : float or `~numpy.ndarray`
declination, radians
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
--------
rc : float or `~numpy.ndarray`
dc : float or `~numpy.ndarray`
"""
# RA, Dec to cartesian unit vectors
pos = erfa.s2c(ri, di)
# Bias-precession-nutation, giving GCRS proper direction.
ppr = erfa.trxp(astrom['bpn'], pos)
# Aberration, giving GCRS natural direction
d = np.zeros_like(ppr)
for j in range(2):
before = norm(ppr-d)
after = erfa.ab(before, astrom['v'], astrom['em'], astrom['bm1'])
d = after - before
pnat = norm(ppr-d)
# Light deflection by the Sun, giving BCRS coordinate direction
d = np.zeros_like(pnat)
for j in range(5):
before = norm(pnat-d)
after = erfa.ld(1.0, before, before, astrom['eh'], astrom['em'], 5e-8)
d = after - before
pco = norm(pnat-d)
# ICRS astrometric RA, Dec
rc, dc = erfa.c2s(pco)
return erfa.anp(rc), dc
def atciqz(rc, dc, astrom):
"""
A slightly modified version of the ERFA function ``eraAtciqz``.
``eraAtciqz`` performs the transformations between two coordinate systems,
with the details of the transformation being encoded into the ``astrom`` array.
The companion function ``eraAticq`` is meant to be its inverse. However, this
is not true for directions close to the Solar centre, since the light deflection
calculations are numerically unstable and therefore not reversible.
This version sidesteps that problem by artificially reducing the light deflection
for directions which are within 90 arcseconds of the Sun's position. This is the
same approach used by the ERFA functions above, except that they use a threshold of
9 arcseconds.
Parameters
----------
rc : float or `~numpy.ndarray`
right ascension, radians
dc : float or `~numpy.ndarray`
declination, radians
astrom : eraASTROM array
ERFA astrometry context, as produced by, e.g. ``eraApci13`` or ``eraApcs13``
Returns
--------
ri : float or `~numpy.ndarray`
di : float or `~numpy.ndarray`
"""
# BCRS coordinate direction (unit vector).
pco = erfa.s2c(rc, dc)
# Light deflection by the Sun, giving BCRS natural direction.
pnat = erfa.ld(1.0, pco, pco, astrom['eh'], astrom['em'], 5e-8)
# Aberration, giving GCRS proper direction.
ppr = erfa.ab(pnat, astrom['v'], astrom['em'], astrom['bm1'])
# Bias-precession-nutation, giving CIRS proper direction.
# Has no effect if matrix is identity matrix, in which case gives GCRS ppr.
pi = erfa.rxp(astrom['bpn'], ppr)
# CIRS (GCRS) RA, Dec
ri, di = erfa.c2s(pi)
return erfa.anp(ri), di
def prepare_earth_position_vel(time):
"""
Get barycentric position and velocity, and heliocentric position of Earth
Parameters
-----------
time : `~astropy.time.Time`
time at which to calculate position and velocity of Earth
Returns
--------
earth_pv : `np.ndarray`
Barycentric position and velocity of Earth, in au and au/day
earth_helio : `np.ndarray`
Heliocentric position of Earth in au
"""
# this goes here to avoid circular import errors
from ..solar_system import (get_body_barycentric, get_body_barycentric_posvel)
# get barycentric position and velocity of earth
earth_pv = get_body_barycentric_posvel('earth', time)
# get heliocentric position of earth, preparing it for passing to erfa.
sun = get_body_barycentric('sun', time)
earth_heliocentric = (earth_pv[0] -
sun).get_xyz(xyz_axis=-1).to_value(u.au)
# Also prepare earth_pv for passing to erfa, which wants xyz in last
# dimension, and pos/vel in one-but-last.
# (Note could use np.stack once our minimum numpy version is >=1.10.)
earth_pv = np.concatenate((earth_pv[0].get_xyz(xyz_axis=-1).to(u.au)
[..., np.newaxis, :].value,
earth_pv[1].get_xyz(xyz_axis=-1).to(u.au/u.d)
[..., np.newaxis, :].value), axis=-2)
return earth_pv, earth_heliocentric
|
d88f706206cc8430031d13d0fc66f740369a64d87b1f9fb8b1ea164915210980 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
from ... import units as u
from ...tests.helper import (catch_warnings,
pytest, quantity_allclose as allclose,
assert_quantity_allclose as assert_allclose)
from ...utils import OrderedDescriptorContainer
from ...utils.compat import NUMPY_LT_1_14
from ...utils.exceptions import AstropyWarning
from .. import representation as r
from ..representation import REPRESENTATION_CLASSES
from .test_representation import unitphysics # this fixture is used below
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
def test_frame_attribute_descriptor():
""" Unit tests of the Attribute descriptor """
from ..attributes import Attribute
class TestAttributes(metaclass=OrderedDescriptorContainer):
attr_none = Attribute()
attr_2 = Attribute(default=2)
attr_3_attr2 = Attribute(default=3, secondary_attribute='attr_2')
attr_none_attr2 = Attribute(default=None, secondary_attribute='attr_2')
attr_none_nonexist = Attribute(default=None, secondary_attribute='nonexist')
t = TestAttributes()
# Defaults
assert t.attr_none is None
assert t.attr_2 == 2
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
assert t.attr_none_nonexist is None # No default and non-existent secondary attr
# Setting values via '_'-prefixed internal vars (as would normally done in __init__)
t._attr_none = 10
assert t.attr_none == 10
t._attr_2 = 20
assert t.attr_2 == 20
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
t._attr_none_attr2 = 40
assert t.attr_none_attr2 == 40
# Make sure setting values via public attribute fails
with pytest.raises(AttributeError) as err:
t.attr_none = 5
assert 'Cannot set frame attribute' in str(err)
def test_frame_subclass_attribute_descriptor():
from ..builtin_frames import FK4
from ..attributes import Attribute, TimeAttribute
from astropy.time import Time
_EQUINOX_B1980 = Time('B1980', scale='tai')
class MyFK4(FK4):
# equinox inherited from FK4, obstime overridden, and newattr is new
obstime = TimeAttribute(default=_EQUINOX_B1980)
newattr = Attribute(default='newattr')
mfk4 = MyFK4()
assert mfk4.equinox.value == 'B1950.000'
assert mfk4.obstime.value == 'B1980.000'
assert mfk4.newattr == 'newattr'
assert set(mfk4.get_frame_attr_names()) == set(['equinox', 'obstime', 'newattr'])
mfk4 = MyFK4(equinox='J1980.0', obstime='J1990.0', newattr='world')
assert mfk4.equinox.value == 'J1980.000'
assert mfk4.obstime.value == 'J1990.000'
assert mfk4.newattr == 'world'
def test_create_data_frames():
from ..builtin_frames import ICRS
# from repr
i1 = ICRS(r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc))
i2 = ICRS(r.UnitSphericalRepresentation(lon=1*u.deg, lat=2*u.deg))
# from preferred name
i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc)
i4 = ICRS(ra=1*u.deg, dec=2*u.deg)
assert i1.data.lat == i3.data.lat
assert i1.data.lon == i3.data.lon
assert i1.data.distance == i3.data.distance
assert i2.data.lat == i4.data.lat
assert i2.data.lon == i4.data.lon
# now make sure the preferred names work as properties
assert_allclose(i1.ra, i3.ra)
assert_allclose(i2.ra, i4.ra)
assert_allclose(i1.distance, i3.distance)
with pytest.raises(AttributeError):
i1.ra = [11.]*u.deg
def test_create_orderered_data():
from ..builtin_frames import ICRS, Galactic, AltAz
TOL = 1e-10*u.deg
i = ICRS(1*u.deg, 2*u.deg)
assert (i.ra - 1*u.deg) < TOL
assert (i.dec - 2*u.deg) < TOL
g = Galactic(1*u.deg, 2*u.deg)
assert (g.l - 1*u.deg) < TOL
assert (g.b - 2*u.deg) < TOL
a = AltAz(1*u.deg, 2*u.deg)
assert (a.az - 1*u.deg) < TOL
assert (a.alt - 2*u.deg) < TOL
with pytest.raises(TypeError):
ICRS(1*u.deg, 2*u.deg, 1*u.deg, 2*u.deg)
with pytest.raises(TypeError):
sph = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)
ICRS(sph, 1*u.deg, 2*u.deg)
def test_create_nodata_frames():
from ..builtin_frames import ICRS, FK4, FK5
i = ICRS()
assert len(i.get_frame_attr_names()) == 0
f5 = FK5()
assert f5.equinox == FK5.get_frame_attr_names()['equinox']
f4 = FK4()
assert f4.equinox == FK4.get_frame_attr_names()['equinox']
# obstime is special because it's a property that uses equinox if obstime is not set
assert f4.obstime in (FK4.get_frame_attr_names()['obstime'],
FK4.get_frame_attr_names()['equinox'])
def test_no_data_nonscalar_frames():
from ..builtin_frames import AltAz
from astropy.time import Time
a1 = AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day,
temperature=np.ones((3, 1)) * u.deg_C)
assert a1.obstime.shape == (3, 10)
assert a1.temperature.shape == (3, 10)
assert a1.shape == (3, 10)
with pytest.raises(ValueError) as exc:
AltAz(obstime=Time('2012-01-01') + np.arange(10.) * u.day,
temperature=np.ones((3,)) * u.deg_C)
assert 'inconsistent shapes' in str(exc)
def test_frame_repr():
from ..builtin_frames import ICRS, FK5
i = ICRS()
assert repr(i) == '<ICRS Frame>'
f5 = FK5()
assert repr(f5).startswith('<FK5 Frame (equinox=')
i2 = ICRS(ra=1*u.deg, dec=2*u.deg)
i3 = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.kpc)
assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' ({})>').format(' 1., 2.' if NUMPY_LT_1_14
else '1., 2.')
assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n'
' ({})>').format(' 1., 2., 3.' if NUMPY_LT_1_14
else '1., 2., 3.')
# try with arrays
i2 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[2.1, 3.1]*u.deg)
i3 = ICRS(ra=[1.1, 2.1]*u.deg, dec=[-15.6, 17.1]*u.deg, distance=[11., 21.]*u.kpc)
assert repr(i2) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' [{}]>').format('( 1.1, 2.1), ( 2.1, 3.1)'
if NUMPY_LT_1_14 else
'(1.1, 2.1), (2.1, 3.1)')
if NUMPY_LT_1_14:
assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n'
' [( 1.1, -15.6, 11.), ( 2.1, 17.1, 21.)]>')
else:
assert repr(i3) == ('<ICRS Coordinate: (ra, dec, distance) in (deg, deg, kpc)\n'
' [(1.1, -15.6, 11.), (2.1, 17.1, 21.)]>')
def test_frame_repr_vels():
from ..builtin_frames import ICRS
i = ICRS(ra=1*u.deg, dec=2*u.deg,
pm_ra_cosdec=1*u.marcsec/u.yr, pm_dec=2*u.marcsec/u.yr)
# unit comes out as mas/yr because of the preferred units defined in the
# frame RepresentationMapping
assert repr(i) == ('<ICRS Coordinate: (ra, dec) in deg\n'
' ({0})\n'
' (pm_ra_cosdec, pm_dec) in mas / yr\n'
' ({0})>').format(' 1., 2.' if NUMPY_LT_1_14 else
'1., 2.')
def test_converting_units():
import re
from ..baseframe import RepresentationMapping
from ..builtin_frames import ICRS, FK5
# this is a regular expression that with split (see below) removes what's
# the decimal point to fix rounding problems
rexrepr = re.compile(r'(.*?=\d\.).*?( .*?=\d\.).*?( .*)')
# Use values that aren't subject to rounding down to X.9999...
i2 = ICRS(ra=2.*u.deg, dec=2.*u.deg)
i2_many = ICRS(ra=[2., 4.]*u.deg, dec=[2., -8.1]*u.deg)
# converting from FK5 to ICRS and back changes the *internal* representation,
# but it should still come out in the preferred form
i4 = i2.transform_to(FK5).transform_to(ICRS)
i4_many = i2_many.transform_to(FK5).transform_to(ICRS)
ri2 = ''.join(rexrepr.split(repr(i2)))
ri4 = ''.join(rexrepr.split(repr(i4)))
assert ri2 == ri4
assert i2.data.lon.unit != i4.data.lon.unit # Internal repr changed
ri2_many = ''.join(rexrepr.split(repr(i2_many)))
ri4_many = ''.join(rexrepr.split(repr(i4_many)))
assert ri2_many == ri4_many
assert i2_many.data.lon.unit != i4_many.data.lon.unit # Internal repr changed
# but that *shouldn't* hold if we turn off units for the representation
class FakeICRS(ICRS):
frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'ra', u.hourangle),
RepresentationMapping('lat', 'dec', None),
RepresentationMapping('distance', 'distance')] # should fall back to default of None unit
}
fi = FakeICRS(i4.data)
ri2 = ''.join(rexrepr.split(repr(i2)))
rfi = ''.join(rexrepr.split(repr(fi)))
rfi = re.sub('FakeICRS', 'ICRS', rfi) # Force frame name to match
assert ri2 != rfi
# the attributes should also get the right units
assert i2.dec.unit == i4.dec.unit
# unless no/explicitly given units
assert i2.dec.unit != fi.dec.unit
assert i2.ra.unit != fi.ra.unit
assert fi.ra.unit == u.hourangle
def test_representation_info():
from ..baseframe import RepresentationMapping
from ..builtin_frames import ICRS
class NewICRS1(ICRS):
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'rara', u.hourangle),
RepresentationMapping('lat', 'decdec', u.degree),
RepresentationMapping('distance', 'distance', u.kpc)]
}
i1 = NewICRS1(rara=10*u.degree, decdec=-12*u.deg, distance=1000*u.pc,
pm_rara_cosdecdec=100*u.mas/u.yr,
pm_decdec=17*u.mas/u.yr,
radial_velocity=10*u.km/u.s)
assert allclose(i1.rara, 10*u.deg)
assert i1.rara.unit == u.hourangle
assert allclose(i1.decdec, -12*u.deg)
assert allclose(i1.distance, 1000*u.pc)
assert i1.distance.unit == u.kpc
assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr)
assert allclose(i1.pm_decdec, 17*u.mas/u.yr)
# this should auto-set the names of UnitSpherical:
i1.set_representation_cls(r.UnitSphericalRepresentation,
s=r.UnitSphericalCosLatDifferential)
assert allclose(i1.rara, 10*u.deg)
assert allclose(i1.decdec, -12*u.deg)
assert allclose(i1.pm_rara_cosdecdec, 100*u.mas/u.yr)
assert allclose(i1.pm_decdec, 17*u.mas/u.yr)
# For backwards compatibility, we also support the string name in the
# representation info dictionary:
class NewICRS2(ICRS):
frame_specific_representation_info = {
'spherical': [
RepresentationMapping('lon', 'ang1', u.hourangle),
RepresentationMapping('lat', 'ang2', u.degree),
RepresentationMapping('distance', 'howfar', u.kpc)]
}
i2 = NewICRS2(ang1=10*u.degree, ang2=-12*u.deg, howfar=1000*u.pc)
assert allclose(i2.ang1, 10*u.deg)
assert i2.ang1.unit == u.hourangle
assert allclose(i2.ang2, -12*u.deg)
assert allclose(i2.howfar, 1000*u.pc)
assert i2.howfar.unit == u.kpc
# Test that the differential kwargs get overridden
class NewICRS3(ICRS):
frame_specific_representation_info = {
r.SphericalCosLatDifferential: [
RepresentationMapping('d_lon_coslat', 'pm_ang1', u.hourangle/u.year),
RepresentationMapping('d_lat', 'pm_ang2'),
RepresentationMapping('d_distance', 'vlos', u.kpc/u.Myr)]
}
i3 = NewICRS3(lon=10*u.degree, lat=-12*u.deg, distance=1000*u.pc,
pm_ang1=1*u.mas/u.yr, pm_ang2=2*u.mas/u.yr,
vlos=100*u.km/u.s)
assert allclose(i3.pm_ang1, 1*u.mas/u.yr)
assert i3.pm_ang1.unit == u.hourangle/u.year
assert allclose(i3.pm_ang2, 2*u.mas/u.yr)
assert allclose(i3.vlos, 100*u.km/u.s)
assert i3.vlos.unit == u.kpc/u.Myr
def test_realizing():
from ..builtin_frames import ICRS, FK5
from ...time import Time
rep = r.SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc)
i = ICRS()
i2 = i.realize_frame(rep)
assert not i.has_data
assert i2.has_data
f = FK5(equinox=Time('J2001', scale='utc'))
f2 = f.realize_frame(rep)
assert not f.has_data
assert f2.has_data
assert f2.equinox == f.equinox
assert f2.equinox != FK5.get_frame_attr_names()['equinox']
# Check that a nicer error message is returned:
with pytest.raises(TypeError) as excinfo:
f.realize_frame(f.representation)
assert ('Class passed as data instead of a representation' in
excinfo.value.args[0])
def test_replicating():
from ..builtin_frames import ICRS, AltAz
from ...time import Time
i = ICRS(ra=[1]*u.deg, dec=[2]*u.deg)
icopy = i.replicate(copy=True)
irepl = i.replicate(copy=False)
i.data._lat[:] = 0*u.deg
assert np.all(i.data.lat == irepl.data.lat)
assert np.all(i.data.lat != icopy.data.lat)
iclone = i.replicate_without_data()
assert i.has_data
assert not iclone.has_data
aa = AltAz(alt=1*u.deg, az=2*u.deg, obstime=Time('J2000'))
aaclone = aa.replicate_without_data(obstime=Time('J2001'))
assert not aaclone.has_data
assert aa.obstime != aaclone.obstime
assert aa.pressure == aaclone.pressure
assert aa.obswl == aaclone.obswl
def test_getitem():
from ..builtin_frames import ICRS
rep = r.SphericalRepresentation(
[1, 2, 3]*u.deg, [4, 5, 6]*u.deg, [7, 8, 9]*u.kpc)
i = ICRS(rep)
assert len(i.ra) == 3
iidx = i[1:]
assert len(iidx.ra) == 2
iidx2 = i[0]
assert iidx2.ra.isscalar
def test_transform():
"""
This test just makes sure the transform architecture works, but does *not*
actually test all the builtin transforms themselves are accurate
"""
from ..builtin_frames import ICRS, FK4, FK5, Galactic
from ...time import Time
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
f = i.transform_to(FK5)
i2 = f.transform_to(ICRS)
assert i2.data.__class__ == r.UnitSphericalRepresentation
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc)
f = i.transform_to(FK5)
i2 = f.transform_to(ICRS)
assert i2.data.__class__ != r.UnitSphericalRepresentation
f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001', scale='utc'))
f4 = f.transform_to(FK4)
f4_2 = f.transform_to(FK4(equinox=f.equinox))
# make sure attributes are copied over correctly
assert f4.equinox == FK4.get_frame_attr_names()['equinox']
assert f4_2.equinox == f.equinox
# make sure self-transforms also work
i = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
i2 = i.transform_to(ICRS)
assert_allclose(i.ra, i2.ra)
assert_allclose(i.dec, i2.dec)
f = FK5(ra=1*u.deg, dec=2*u.deg, equinox=Time('J2001', scale='utc'))
f2 = f.transform_to(FK5) # default equinox, so should be *different*
assert f2.equinox == FK5().equinox
with pytest.raises(AssertionError):
assert_allclose(f.ra, f2.ra)
with pytest.raises(AssertionError):
assert_allclose(f.dec, f2.dec)
# finally, check Galactic round-tripping
i1 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg)
i2 = i1.transform_to(Galactic).transform_to(ICRS)
assert_allclose(i1.ra, i2.ra)
assert_allclose(i1.dec, i2.dec)
def test_transform_to_nonscalar_nodata_frame():
# https://github.com/astropy/astropy/pull/5254#issuecomment-241592353
from ..builtin_frames import ICRS, FK5
from ...time import Time
times = Time('2016-08-23') + np.linspace(0, 10, 12)*u.day
coo1 = ICRS(ra=[[0.], [10.], [20.]]*u.deg,
dec=[[-30.], [30.], [60.]]*u.deg)
coo2 = coo1.transform_to(FK5(equinox=times))
assert coo2.shape == (3, 12)
def test_sep():
from ..builtin_frames import ICRS
i1 = ICRS(ra=0*u.deg, dec=1*u.deg)
i2 = ICRS(ra=0*u.deg, dec=2*u.deg)
sep = i1.separation(i2)
assert sep.deg == 1
i3 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc)
i4 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[4, 5]*u.kpc)
sep3d = i3.separation_3d(i4)
assert_allclose(sep3d.to(u.kpc), np.array([1, 1])*u.kpc)
# check that it works even with velocities
i5 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[5, 6]*u.kpc,
pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr,
radial_velocity=[5, 6]*u.km/u.s)
i6 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[7, 8]*u.kpc,
pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr,
radial_velocity=[5, 6]*u.km/u.s)
sep3d = i5.separation_3d(i6)
assert_allclose(sep3d.to(u.kpc), np.array([2, 2])*u.kpc)
def test_time_inputs():
"""
Test validation and conversion of inputs for equinox and obstime attributes.
"""
from ...time import Time
from ..builtin_frames import FK4
c = FK4(1 * u.deg, 2 * u.deg, equinox='J2001.5', obstime='2000-01-01 12:00:00')
assert c.equinox == Time('J2001.5')
assert c.obstime == Time('2000-01-01 12:00:00')
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, equinox=1.5)
assert 'Invalid time input' in str(err)
with pytest.raises(ValueError) as err:
c = FK4(1 * u.deg, 2 * u.deg, obstime='hello')
assert 'Invalid time input' in str(err)
# A vector time should work if the shapes match, but we don't automatically
# broadcast the basic data (just like time).
FK4([1, 2] * u.deg, [2, 3] * u.deg, obstime=['J2000', 'J2001'])
with pytest.raises(ValueError) as err:
FK4(1 * u.deg, 2 * u.deg, obstime=['J2000', 'J2001'])
assert 'shape' in str(err)
def test_is_frame_attr_default():
"""
Check that the `is_frame_attr_default` machinery works as expected
"""
from ...time import Time
from ..builtin_frames import FK5
c1 = FK5(ra=1*u.deg, dec=1*u.deg)
c2 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=FK5.get_frame_attr_names()['equinox'])
c3 = FK5(ra=1*u.deg, dec=1*u.deg, equinox=Time('J2001.5'))
assert c1.equinox == c2.equinox
assert c1.equinox != c3.equinox
assert c1.is_frame_attr_default('equinox')
assert not c2.is_frame_attr_default('equinox')
assert not c3.is_frame_attr_default('equinox')
c4 = c1.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg))
c5 = c2.realize_frame(r.UnitSphericalRepresentation(3*u.deg, 4*u.deg))
assert c4.is_frame_attr_default('equinox')
assert not c5.is_frame_attr_default('equinox')
def test_altaz_attributes():
from ...time import Time
from .. import EarthLocation, AltAz
aa = AltAz(1*u.deg, 2*u.deg)
assert aa.obstime is None
assert aa.location is None
aa2 = AltAz(1*u.deg, 2*u.deg, obstime='J2000')
assert aa2.obstime == Time('J2000')
aa3 = AltAz(1*u.deg, 2*u.deg, location=EarthLocation(0*u.deg, 0*u.deg, 0*u.m))
assert isinstance(aa3.location, EarthLocation)
def test_representation():
"""
Test the getter and setter properties for `representation`
"""
from ..builtin_frames import ICRS
# Create the frame object.
icrs = ICRS(ra=1*u.deg, dec=1*u.deg)
data = icrs.data
# Create some representation objects.
icrs_cart = icrs.cartesian
icrs_spher = icrs.spherical
# Testing when `_representation` set to `CartesianRepresentation`.
icrs.representation = r.CartesianRepresentation
assert icrs.representation == r.CartesianRepresentation
assert icrs_cart.x == icrs.x
assert icrs_cart.y == icrs.y
assert icrs_cart.z == icrs.z
assert icrs.data == data
# Testing that an ICRS object in CartesianRepresentation must not have spherical attributes.
for attr in ('ra', 'dec', 'distance'):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert 'object has no attribute' in str(err)
# Testing when `_representation` set to `CylindricalRepresentation`.
icrs.representation = r.CylindricalRepresentation
assert icrs.representation == r.CylindricalRepresentation
assert icrs.data == data
# Testing setter input using text argument for spherical.
icrs.representation = 'spherical'
assert icrs.representation is r.SphericalRepresentation
assert icrs_spher.lat == icrs.dec
assert icrs_spher.lon == icrs.ra
assert icrs_spher.distance == icrs.distance
assert icrs.data == data
# Testing that an ICRS object in SphericalRepresentation must not have cartesian attributes.
for attr in ('x', 'y', 'z'):
with pytest.raises(AttributeError) as err:
getattr(icrs, attr)
assert 'object has no attribute' in str(err)
# Testing setter input using text argument for cylindrical.
icrs.representation = 'cylindrical'
assert icrs.representation is r.CylindricalRepresentation
assert icrs.data == data
with pytest.raises(ValueError) as err:
icrs.representation = 'WRONG'
assert 'but must be a BaseRepresentation class' in str(err)
with pytest.raises(ValueError) as err:
icrs.representation = ICRS
assert 'but must be a BaseRepresentation class' in str(err)
def test_represent_as():
from ..builtin_frames import ICRS
icrs = ICRS(ra=1*u.deg, dec=1*u.deg)
cart1 = icrs.represent_as('cartesian')
cart2 = icrs.represent_as(r.CartesianRepresentation)
cart1.x == cart2.x
cart1.y == cart2.y
cart1.z == cart2.z
# now try with velocities
icrs = ICRS(ra=0*u.deg, dec=0*u.deg, distance=10*u.kpc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=1*u.km/u.s)
# single string
rep2 = icrs.represent_as('cylindrical')
assert isinstance(rep2, r.CylindricalRepresentation)
assert isinstance(rep2.differentials['s'], r.CylindricalDifferential)
# single class with positional in_frame_units, verify that warning raised
with catch_warnings() as w:
icrs.represent_as(r.CylindricalRepresentation, False)
assert len(w) == 1
assert w[0].category == AstropyWarning
assert 'argument position' in str(w[0].message)
# TODO: this should probably fail in the future once we figure out a better
# workaround for dealing with UnitSphericalRepresentation's with
# RadialDifferential's
# two classes
# rep2 = icrs.represent_as(r.CartesianRepresentation,
# r.SphericalCosLatDifferential)
# assert isinstance(rep2, r.CartesianRepresentation)
# assert isinstance(rep2.differentials['s'], r.SphericalCosLatDifferential)
with pytest.raises(ValueError):
icrs.represent_as('odaigahara')
def test_shorthand_representations():
from ..builtin_frames import ICRS
rep = r.CartesianRepresentation([1, 2, 3]*u.pc)
dif = r.CartesianDifferential([1, 2, 3]*u.km/u.s)
rep = rep.with_differentials(dif)
icrs = ICRS(rep)
sph = icrs.spherical
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials['s'], r.SphericalDifferential)
sph = icrs.sphericalcoslat
assert isinstance(sph, r.SphericalRepresentation)
assert isinstance(sph.differentials['s'], r.SphericalCosLatDifferential)
def test_dynamic_attrs():
from ..builtin_frames import ICRS
c = ICRS(1*u.deg, 2*u.deg)
assert 'ra' in dir(c)
assert 'dec' in dir(c)
with pytest.raises(AttributeError) as err:
c.blahblah
assert "object has no attribute 'blahblah'" in str(err)
with pytest.raises(AttributeError) as err:
c.ra = 1
assert "Cannot set any frame attribute" in str(err)
c.blahblah = 1
assert c.blahblah == 1
def test_nodata_error():
from ..builtin_frames import ICRS
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.data
assert 'does not have associated data' in str(excinfo.value)
def test_len0_data():
from ..builtin_frames import ICRS
i = ICRS([]*u.deg, []*u.deg)
assert i.has_data
repr(i)
def test_quantity_attributes():
from ..builtin_frames import GCRS
# make sure we can create a GCRS frame with valid inputs
GCRS(obstime='J2002', obsgeoloc=[1, 2, 3]*u.km, obsgeovel=[4, 5, 6]*u.km/u.s)
# make sure it fails for invalid lovs or vels
with pytest.raises(TypeError):
GCRS(obsgeoloc=[1, 2, 3]) # no unit
with pytest.raises(u.UnitsError):
GCRS(obsgeoloc=[1, 2, 3]*u.km/u.s) # incorrect unit
with pytest.raises(ValueError):
GCRS(obsgeoloc=[1, 3]*u.km) # incorrect shape
def test_eloc_attributes():
from .. import AltAz, ITRS, GCRS, EarthLocation
el = EarthLocation(lon=12.3*u.deg, lat=45.6*u.deg, height=1*u.km)
it = ITRS(r.SphericalRepresentation(lon=12.3*u.deg, lat=45.6*u.deg, distance=1*u.km))
gc = GCRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=6375*u.km)
el1 = AltAz(location=el).location
assert isinstance(el1, EarthLocation)
# these should match *exactly* because the EarthLocation
assert el1.lat == el.lat
assert el1.lon == el.lon
assert el1.height == el.height
el2 = AltAz(location=it).location
assert isinstance(el2, EarthLocation)
# these should *not* match because giving something in Spherical ITRS is
# *not* the same as giving it as an EarthLocation: EarthLocation is on an
# elliptical geoid. So the longitude should match (because flattening is
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
assert not allclose(el2.lat, it.spherical.lat)
assert allclose(el2.lon, it.spherical.lon)
assert el2.height < -6000*u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
assert not allclose(el3.lat, gc.dec)
assert not allclose(el3.lon, gc.ra)
assert np.abs(el3.height) < 500*u.km
def test_equivalent_frames():
from .. import SkyCoord
from ..builtin_frames import ICRS, FK4, FK5, AltAz
i = ICRS()
i2 = ICRS(1*u.deg, 2*u.deg)
assert i.is_equivalent_frame(i)
assert i.is_equivalent_frame(i2)
with pytest.raises(TypeError):
assert i.is_equivalent_frame(10)
with pytest.raises(TypeError):
assert i2.is_equivalent_frame(SkyCoord(i2))
f1 = FK5()
f2 = FK5(1*u.deg, 2*u.deg, equinox='J2000')
f3 = FK5(equinox='J2010')
f4 = FK4(equinox='J2010')
assert f1.is_equivalent_frame(f1)
assert not i.is_equivalent_frame(f1)
assert f1.is_equivalent_frame(f2)
assert not f1.is_equivalent_frame(f3)
assert not f3.is_equivalent_frame(f4)
aa1 = AltAz()
aa2 = AltAz(obstime='J2010')
assert aa2.is_equivalent_frame(aa2)
assert not aa1.is_equivalent_frame(i)
assert not aa1.is_equivalent_frame(aa2)
def test_representation_subclass():
# Regression test for #3354
from ..builtin_frames import FK5
# Normally when instantiating a frame without a distance the frame will try
# and use UnitSphericalRepresentation internally instead of
# SphericalRepresentation.
frame = FK5(representation=r.SphericalRepresentation, ra=32 * u.deg, dec=20 * u.deg)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation == r.SphericalRepresentation
# If using a SphericalRepresentation class this used to not work, so we
# test here that this is now fixed.
class NewSphericalRepresentation(r.SphericalRepresentation):
attr_classes = r.SphericalRepresentation.attr_classes
frame = FK5(representation=NewSphericalRepresentation, lon=32 * u.deg, lat=20 * u.deg)
assert type(frame._data) == r.UnitSphericalRepresentation
assert frame.representation == NewSphericalRepresentation
# A similar issue then happened in __repr__ with subclasses of
# SphericalRepresentation.
assert repr(frame) == ("<FK5 Coordinate (equinox=J2000.000): (lon, lat) in deg\n"
" ({})>").format(' 32., 20.' if NUMPY_LT_1_14
else '32., 20.')
# A more subtle issue is when specifying a custom
# UnitSphericalRepresentation subclass for the data and
# SphericalRepresentation or a subclass for the representation.
class NewUnitSphericalRepresentation(r.UnitSphericalRepresentation):
attr_classes = r.UnitSphericalRepresentation.attr_classes
def __repr__(self):
return "<NewUnitSphericalRepresentation: spam spam spam>"
frame = FK5(NewUnitSphericalRepresentation(lon=32 * u.deg, lat=20 * u.deg),
representation=NewSphericalRepresentation)
assert repr(frame) == "<FK5 Coordinate (equinox=J2000.000): spam spam spam>"
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
from ..builtin_frames import ICRS
c = ICRS([1, 1] * u.deg, [2, 2] * u.deg)
c.representation = 'cartesian'
assert c[0].representation is r.CartesianRepresentation
def test_component_error_useful():
"""
Check that a data-less frame gives useful error messages about not having
data when the attributes asked for are possible coordinate components
"""
from ..builtin_frames import ICRS
i = ICRS()
with pytest.raises(ValueError) as excinfo:
i.ra
assert 'does not have associated data' in str(excinfo.value)
with pytest.raises(AttributeError) as excinfo1:
i.foobar
with pytest.raises(AttributeError) as excinfo2:
i.lon # lon is *not* the component name despite being the underlying representation's name
assert "object has no attribute 'foobar'" in str(excinfo1.value)
assert "object has no attribute 'lon'" in str(excinfo2.value)
def test_cache_clear():
from ..builtin_frames import ICRS
i = ICRS(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache['representation']) == 2
i.cache.clear()
assert len(i.cache['representation']) == 0
def test_inplace_array():
from ..builtin_frames import ICRS
i = ICRS([[1, 2], [3, 4]]*u.deg, [[10, 20], [30, 40]]*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache['representation']) == 2
# Modify the data
i.data.lon[:, 0] = [100, 200]*u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert_allclose(i.ra, [[100, 2], [200, 4]]*u.deg)
assert_allclose(i.dec, [[10, 20], [30, 40]]*u.deg)
def test_inplace_change():
from ..builtin_frames import ICRS
i = ICRS(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
# Check that repr() has added a rep to the cache
assert len(i.cache['representation']) == 2
# Modify the data
i.data.lon[()] = 10*u.deg
# Clear the cache
i.cache.clear()
# This will use a second (potentially cached rep)
assert i.ra == 10 * u.deg
assert i.dec == 2 * u.deg
def test_representation_with_multiple_differentials():
from ..builtin_frames import ICRS
dif1 = r.CartesianDifferential([1, 2, 3]*u.km/u.s)
dif2 = r.CartesianDifferential([1, 2, 3]*u.km/u.s**2)
rep = r.CartesianRepresentation([1, 2, 3]*u.pc,
differentials={'s': dif1, 's2': dif2})
# check warning is raised for a scalar
with pytest.raises(ValueError):
ICRS(rep)
def test_representation_arg_backwards_compatibility():
# TODO: this test can be removed when the `representation` argument is
# removed from the BaseCoordinateFrame initializer.
from ..builtin_frames import ICRS
c1 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation_type=r.CartesianRepresentation)
c2 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation=r.CartesianRepresentation)
c3 = ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation='cartesian')
assert c1.x == c2.x
assert c1.y == c2.y
assert c1.z == c2.z
assert c1.x == c3.x
assert c1.y == c3.y
assert c1.z == c3.z
assert c1.representation == c1.representation_type
with pytest.raises(ValueError):
ICRS(x=1*u.pc, y=2*u.pc, z=3*u.pc,
representation='cartesian',
representation_type='cartesian')
def test_missing_component_error_names():
"""
This test checks that the component names are frame component names, not
representation or differential names, when referenced in an exception raised
when not passing in enough data. For example:
ICRS(ra=10*u.deg)
should state:
TypeError: __init__() missing 1 required positional argument: 'dec'
"""
from ..builtin_frames import ICRS
with pytest.raises(TypeError) as e:
ICRS(ra=150 * u.deg)
assert "missing 1 required positional argument: 'dec'" in str(e)
with pytest.raises(TypeError) as e:
ICRS(ra=150*u.deg, dec=-11*u.deg,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr)
assert "pm_ra_cosdec" in str(e)
def test_non_spherical_representation_unit_creation(unitphysics):
from ..builtin_frames import ICRS
class PhysicsICRS(ICRS):
default_representation = r.PhysicsSphericalRepresentation
pic = PhysicsICRS(phi=1*u.deg, theta=25*u.deg, r=1*u.kpc)
assert isinstance(pic.data, r.PhysicsSphericalRepresentation)
picu = PhysicsICRS(phi=1*u.deg, theta=25*u.deg)
assert isinstance(picu.data, unitphysics)
|
f50aa95a451c90b43fc3a8a68e632d886b6927222452c6edc991b8ec4dd38967 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initalization and other aspects of Angle and subclasses"""
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from ..angles import Longitude, Latitude, Angle
from ... import units as u
from ..errors import IllegalSecondError, IllegalMinuteError, IllegalHourError
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
''' The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle.'''
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a7 = Angle((54, 7, 26.832), unit=u.degree)
a8 = Angle("54°07'26.832\"")
# (deg,min,sec) *tuples* are acceptable, but lists/arrays are *not*
# because of the need to eventually support arrays of coordinates
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit='hour')
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a7 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a6.radian, a7.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45., u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yield Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.*u.deg
assert type(a8) is Angle
a9 = 1.*u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting for Angle objects
"""
'''
The string method of Angle has this signature:
def string(self, unit=DEGREE, decimal=False, sep=" ", precision=5,
pad=False):
The "decimal" parameter defaults to False since if you need to print the
Angle as a decimal, there's no need to use the "format" method (see
above).
'''
angle = Angle("54.12412", unit=u.degree)
# __str__ is the default `format`
assert str(angle) == angle.to_string()
res = 'Angle as HMS: 3h36m29.7888s'
assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour)) == res
res = 'Angle as HMS: 3:36:29.7888'
assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=":")) == res
res = 'Angle as HMS: 3:36:29.79'
assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=":",
precision=2)) == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as HMS: 3h36m29.7888s'
assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour,
sep=("h", "m", "s"),
precision=4)) == res
res = 'Angle as HMS: 3-36|29.7888'
assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep=["-", "|"],
precision=4)) == res
res = 'Angle as HMS: 3-36-29.7888'
assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, sep="-",
precision=4)) == res
res = 'Angle as HMS: 03h36m29.7888s'
assert "Angle as HMS: {0}".format(angle.to_string(unit=u.hour, precision=4,
pad=True)) == res
# Same as above, in degrees
angle = Angle("3 36 29.78880", unit=u.degree)
res = 'Angle as DMS: 3d36m29.7888s'
assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree)) == res
res = 'Angle as DMS: 3:36:29.7888'
assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=":")) == res
res = 'Angle as DMS: 3:36:29.79'
assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=":",
precision=2)) == res
# Note that you can provide one, two, or three separators passed as a
# tuple or list
res = 'Angle as DMS: 3d36m29.7888s'
assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree,
sep=("d", "m", "s"),
precision=4)) == res
res = 'Angle as DMS: 3-36|29.7888'
assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep=["-", "|"],
precision=4)) == res
res = 'Angle as DMS: 3-36-29.7888'
assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, sep="-",
precision=4)) == res
res = 'Angle as DMS: 03d36m29.7888s'
assert "Angle as DMS: {0}".format(angle.to_string(unit=u.degree, precision=4,
pad=True)) == res
res = 'Angle as rad: 0.0629763rad'
assert "Angle as rad: {0}".format(angle.to_string(unit=u.radian)) == res
res = 'Angle as rad decimal: 0.0629763'
assert "Angle as rad decimal: {0}".format(angle.to_string(unit=u.radian, decimal=True)) == res
# check negative angles
angle = Angle(-1.23456789, unit=u.degree)
angle2 = Angle(-1.23456789, unit=u.hour)
assert angle.to_string() == '-1d14m04.4444s'
assert angle.to_string(pad=True) == '-01d14m04.4444s'
assert angle.to_string(unit=u.hour) == '-0h04m56.2963s'
assert angle2.to_string(unit=u.hour, pad=True) == '-01h14m04.4444s'
assert angle.to_string(unit=u.radian, decimal=True) == '-0.0215473'
def test_to_string_vector():
# Regression test for the fact that vectorize doesn't work with Numpy 1.6
assert Angle([1./7., 1./7.], unit='deg').to_string()[0] == "0d08m34.2857s"
assert Angle([1./7.], unit='deg').to_string()[0] == "0d08m34.2857s"
assert Angle(1./7., unit='deg').to_string() == "0d08m34.2857s"
def test_angle_format_roundtripping():
"""
Ensures that the string representation of an angle can be used to create a
new valid Angle.
"""
a1 = Angle(0, unit=u.radian)
a2 = Angle(10, unit=u.degree)
a3 = Angle(0.543, unit=u.degree)
a4 = Angle('1d2m3.4s')
assert Angle(str(a1)).degree == a1.degree
assert Angle(str(a2)).degree == a2.degree
assert Angle(str(a3)).degree == a3.degree
assert Angle(str(a4)).degree == a4.degree
# also check Longitude/Latitude
ra = Longitude('1h2m3.4s')
dec = Latitude('1d2m3.4s')
assert_allclose(Angle(str(ra)).degree, ra.degree)
assert_allclose(Angle(str(dec)).degree, dec.degree)
def test_radec():
"""
Tests creation/operations of Longitude and Latitude objects
"""
'''
Longitude and Latitude are objects that are subclassed from Angle. As with Angle, Longitude
and Latitude can parse any unambiguous format (tuples, formatted strings, etc.).
The intention is not to create an Angle subclass for every possible
coordinate object (e.g. galactic l, galactic b). However, equatorial Longitude/Latitude
are so prevalent in astronomy that it's worth creating ones for these
units. They will be noted as "special" in the docs and use of the just the
Angle class is to be used for other coordinate systems.
'''
with pytest.raises(u.UnitsError):
ra = Longitude("4:08:15.162342") # error - hours or degrees?
with pytest.raises(u.UnitsError):
ra = Longitude("-4:08:15.162342")
# the "smart" initializer allows >24 to automatically do degrees, but the
# Angle-based one does not
# TODO: adjust in 0.3 for whatever behavior is decided on
# ra = Longitude("26:34:15.345634") # unambiguous b/c hours don't go past 24
# assert_allclose(ra.degree, 26.570929342)
with pytest.raises(u.UnitsError):
ra = Longitude("26:34:15.345634")
# ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(68)
with pytest.raises(u.UnitsError):
ra = Longitude(12)
with pytest.raises(ValueError):
ra = Longitude("garbage containing a d and no units")
ra = Longitude("12h43m23s")
assert_allclose(ra.hour, 12.7230555556)
ra = Longitude((56, 14, 52.52), unit=u.degree) # can accept tuples
# TODO: again, fix based on >24 behavior
# ra = Longitude((56,14,52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((56, 14, 52.52))
with pytest.raises(u.UnitsError):
ra = Longitude((12, 14, 52)) # ambiguous w/o units
ra = Longitude((12, 14, 52), unit=u.hour)
ra = Longitude([56, 64, 52.2], unit=u.degree) # ...but not arrays (yet)
# Units can be specified
ra = Longitude("4:08:15.162342", unit=u.hour)
# TODO: this was the "smart" initializer behavior - adjust in 0.3 appropriately
# Where Longitude values are commonly found in hours or degrees, declination is
# nearly always specified in degrees, so this is the default.
# dec = Latitude("-41:08:15.162342")
with pytest.raises(u.UnitsError):
dec = Latitude("-41:08:15.162342")
dec = Latitude("-41:08:15.162342", unit=u.degree) # same as above
def test_negative_zero_dms():
# Test for DMS parser
a = Angle('-00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
# Unicode minus
a = Angle('−00:00:10', u.deg)
assert_allclose(a.degree, -10. / 3600.)
def test_negative_zero_dm():
# Test for DM parser
a = Angle('-00:10', u.deg)
assert_allclose(a.degree, -10. / 60.)
def test_negative_zero_hms():
# Test for HMS parser
a = Angle('-00:00:10', u.hour)
assert_allclose(a.hour, -10. / 3600.)
def test_negative_zero_hm():
# Test for HM parser
a = Angle('-00:10', u.hour)
assert_allclose(a.hour, -10. / 60.)
def test_negative_sixty_hm():
# Test for HM parser
a = Angle('-00:60', u.hour)
assert_allclose(a.hour, -1.)
def test_plus_sixty_hm():
# Test for HM parser
a = Angle('00:60', u.hour)
assert_allclose(a.hour, 1.)
def test_negative_fifty_nine_sixty_dms():
# Test for DMS parser
a = Angle('-00:59:60', u.deg)
assert_allclose(a.degree, -1.)
def test_plus_fifty_nine_sixty_dms():
# Test for DMS parser
a = Angle('+00:59:60', u.deg)
assert_allclose(a.degree, 1.)
def test_negative_sixty_dms():
# Test for DMS parser
a = Angle('-00:00:60', u.deg)
assert_allclose(a.degree, -1. / 60.)
def test_plus_sixty_dms():
# Test for DMS parser
a = Angle('+00:00:60', u.deg)
assert_allclose(a.degree, 1. / 60.)
def test_angle_to_is_angle():
a = Angle('00:00:60', u.deg)
assert isinstance(a, Angle)
assert isinstance(a.to(u.rad), Angle)
def test_angle_to_quantity():
a = Angle('00:00:60', u.deg)
q = u.Quantity(a)
assert isinstance(q, u.Quantity)
assert q.unit is u.deg
def test_quantity_to_angle():
a = Angle(1.0*u.deg)
assert isinstance(a, Angle)
with pytest.raises(u.UnitsError):
Angle(1.0*u.meter)
a = Angle(1.0*u.hour)
assert isinstance(a, Angle)
assert a.unit is u.hourangle
with pytest.raises(u.UnitsError):
Angle(1.0*u.min)
def test_angle_string():
a = Angle('00:00:60', u.deg)
assert str(a) == '0d01m00s'
a = Angle('-00:00:10', u.hour)
assert str(a) == '-0h00m10s'
a = Angle(3.2, u.radian)
assert str(a) == '3.2rad'
a = Angle(4.2, u.microarcsecond)
assert str(a) == '4.2uarcsec'
a = Angle('1.0uarcsec')
assert a.value == 1.0
assert a.unit == u.microarcsecond
a = Angle("3d")
assert_allclose(a.value, 3.0)
assert a.unit == u.degree
a = Angle('10"')
assert_allclose(a.value, 10.0)
assert a.unit == u.arcsecond
a = Angle("10'")
assert_allclose(a.value, 10.0)
assert a.unit == u.arcminute
def test_angle_repr():
assert 'Angle' in repr(Angle(0, u.deg))
assert 'Longitude' in repr(Longitude(0, u.deg))
assert 'Latitude' in repr(Latitude(0, u.deg))
a = Angle(0, u.deg)
repr(a)
def test_large_angle_representation():
"""Test that angles above 360 degrees can be output as strings,
in repr, str, and to_string. (regression test for #1413)"""
a = Angle(350, u.deg) + Angle(350, u.deg)
a.to_string()
a.to_string(u.hourangle)
repr(a)
repr(a.to(u.hourangle))
str(a)
str(a.to(u.hourangle))
def test_wrap_at_inplace():
a = Angle([-20, 150, 350, 360] * u.deg)
out = a.wrap_at('180d', inplace=True)
assert out is None
assert np.all(a.degree == np.array([-20., 150., -10., 0.]))
def test_latitude():
with pytest.raises(ValueError):
lat = Latitude(['91d', '89d'])
with pytest.raises(ValueError):
lat = Latitude('-91d')
lat = Latitude(['90d', '89d'])
# check that one can get items
assert lat[0] == 90 * u.deg
assert lat[1] == 89 * u.deg
# and that comparison with angles works
assert np.all(lat == Angle(['90d', '89d']))
# check setitem works
lat[1] = 45. * u.deg
assert np.all(lat == Angle(['90d', '45d']))
# but not with values out of range
with pytest.raises(ValueError):
lat[0] = 90.001 * u.deg
with pytest.raises(ValueError):
lat[0] = -90.001 * u.deg
# these should also not destroy input (#1851)
assert np.all(lat == Angle(['90d', '45d']))
# conserve type on unit change (closes #1423)
angle = lat.to('radian')
assert type(angle) is Latitude
# but not on calculations
angle = lat - 190 * u.deg
assert type(angle) is Angle
assert angle[0] == -100 * u.deg
lat = Latitude('80d')
angle = lat / 2.
assert type(angle) is Angle
assert angle == 40 * u.deg
angle = lat * 2.
assert type(angle) is Angle
assert angle == 160 * u.deg
angle = -lat
assert type(angle) is Angle
assert angle == -80 * u.deg
# Test errors when trying to interoperate with longitudes.
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude(lon)
assert "A Latitude angle cannot be created from a Longitude angle" in str(excinfo)
with pytest.raises(TypeError) as excinfo:
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = lon
assert "A Longitude angle cannot be assigned to a Latitude angle" in str(excinfo)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lon = Longitude(10, 'deg')
lat = Latitude(Angle(lon))
assert lat.value == 10.0
# Check setitem.
lon = Longitude(10, 'deg')
lat = Latitude([20], 'deg')
lat[0] = Angle(lon)
assert lat.value[0] == 10.0
def test_longitude():
# Default wrapping at 360d with an array input
lon = Longitude(['370d', '88d'])
assert np.all(lon == Longitude(['10d', '88d']))
assert np.all(lon == Angle(['10d', '88d']))
# conserve type on unit change and keep wrap_angle (closes #1423)
angle = lon.to('hourangle')
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[0]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
angle = lon[1:]
assert type(angle) is Longitude
assert angle.wrap_angle == lon.wrap_angle
# but not on calculations
angle = lon / 2.
assert np.all(angle == Angle(['5d', '44d']))
assert type(angle) is Angle
assert not hasattr(angle, 'wrap_angle')
angle = lon * 2. + 400 * u.deg
assert np.all(angle == Angle(['420d', '576d']))
assert type(angle) is Angle
# Test setting a mutable value and having it wrap
lon[1] = -10 * u.deg
assert np.all(lon == Angle(['10d', '350d']))
# Test wrapping and try hitting some edge cases
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
assert np.all(lon.degree == np.array([0., 90, 180, 270, 0]))
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian, wrap_angle='180d')
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
# Wrap on setting wrap_angle property (also test auto-conversion of wrap_angle to an Angle)
lon = Longitude(np.array([0, 0.5, 1.0, 1.5, 2.0]) * np.pi, unit=u.radian)
lon.wrap_angle = '180d'
assert np.all(lon.degree == np.array([0., 90, -180, -90, 0]))
lon = Longitude('460d')
assert lon == Angle('100d')
lon.wrap_angle = '90d'
assert lon == Angle('-260d')
# check that if we initialize a longitude with another longitude,
# wrap_angle is kept by default
lon2 = Longitude(lon)
assert lon2.wrap_angle == lon.wrap_angle
# but not if we explicitly set it
lon3 = Longitude(lon, wrap_angle='180d')
assert lon3.wrap_angle == 180 * u.deg
# check for problem reported in #2037 about Longitude initializing to -0
lon = Longitude(0, u.deg)
lonstr = lon.to_string()
assert not lonstr.startswith('-')
# also make sure dtype is correctly conserved
assert Longitude(0, u.deg, dtype=float).dtype == np.dtype(float)
assert Longitude(0, u.deg, dtype=int).dtype == np.dtype(int)
# Test errors when trying to interoperate with latitudes.
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude(lat)
assert "A Longitude angle cannot be created from a Latitude angle" in str(excinfo)
with pytest.raises(TypeError) as excinfo:
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = lat
assert "A Latitude angle cannot be assigned to a Longitude angle" in str(excinfo)
# Check we can work around the Lat vs Long checks by casting explicitly to Angle.
lat = Latitude(10, 'deg')
lon = Longitude(Angle(lat))
assert lon.value == 10.0
# Check setitem.
lat = Latitude(10, 'deg')
lon = Longitude([20], 'deg')
lon[0] = Angle(lat)
assert lon.value[0] == 10.0
def test_wrap_at():
a = Angle([-20, 150, 350, 360] * u.deg)
assert np.all(a.wrap_at(360 * u.deg).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at(Angle(360, unit=u.deg)).degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('360d').degree == np.array([340., 150., 350., 0.]))
assert np.all(a.wrap_at('180d').degree == np.array([-20., 150., -10., 0.]))
assert np.all(a.wrap_at(np.pi * u.rad).degree == np.array([-20., 150., -10., 0.]))
# Test wrapping a scalar Angle
a = Angle('190d')
assert a.wrap_at('180d') == Angle('-170d')
a = Angle(np.arange(-1000.0, 1000.0, 0.125), unit=u.deg)
for wrap_angle in (270, 0.2, 0.0, 360.0, 500, -2000.125):
aw = a.wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
aw = a.to(u.rad).wrap_at(wrap_angle * u.deg)
assert np.all(aw.degree >= wrap_angle - 360.0)
assert np.all(aw.degree < wrap_angle)
def test_is_within_bounds():
a = Angle([-20, 150, 350] * u.deg)
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
a = Angle('-20d')
assert a.is_within_bounds('0d', '360d') is False
assert a.is_within_bounds(None, '360d') is True
assert a.is_within_bounds(-30 * u.deg, None) is True
def test_angle_mismatched_unit():
a = Angle('+6h7m8s', unit=u.degree)
assert_allclose(a.value, 91.78333333333332)
def test_regression_formatting_negative():
# Regression test for a bug that caused:
#
# >>> Angle(-1., unit='deg').to_string()
# '-1d00m-0s'
assert Angle(-0., unit='deg').to_string() == '-0d00m00s'
assert Angle(-1., unit='deg').to_string() == '-1d00m00s'
assert Angle(-0., unit='hour').to_string() == '-0h00m00s'
assert Angle(-1., unit='hour').to_string() == '-1h00m00s'
def test_empty_sep():
a = Angle('05h04m31.93830s')
assert a.to_string(sep='', precision=2, pad=True) == '050431.94'
def test_create_tuple():
"""
Tests creation of an angle with a (d,m,s) or (h,m,s) tuple
"""
a1 = Angle((1, 30, 0), unit=u.degree)
assert a1.value == 1.5
a1 = Angle((1, 30, 0), unit=u.hourangle)
assert a1.value == 1.5
def test_list_of_quantities():
a1 = Angle([1*u.deg, 1*u.hourangle])
assert a1.unit == u.deg
assert_allclose(a1.value, [1, 15])
a2 = Angle([1*u.hourangle, 1*u.deg], u.deg)
assert a2.unit == u.deg
assert_allclose(a2.value, [15, 1])
def test_multiply_divide():
# Issue #2273
a1 = Angle([1, 2, 3], u.deg)
a2 = Angle([4, 5, 6], u.deg)
a3 = a1 * a2
assert_allclose(a3.value, [4, 10, 18])
assert a3.unit == (u.deg * u.deg)
a3 = a1 / a2
assert_allclose(a3.value, [.25, .4, .5])
assert a3.unit == u.dimensionless_unscaled
def test_mixed_string_and_quantity():
a1 = Angle(['1d', 1. * u.deg])
assert_array_equal(a1.value, [1., 1.])
assert a1.unit == u.deg
a2 = Angle(['1d', 1 * u.rad * np.pi, '3d'])
assert_array_equal(a2.value, [1., 180., 3.])
assert a2.unit == u.deg
def test_array_angle_tostring():
aobj = Angle([1, 2], u.deg)
assert aobj.to_string().dtype.kind == 'U'
assert np.all(aobj.to_string() == ['1d00m00s', '2d00m00s'])
def test_wrap_at_without_new():
"""
Regression test for subtle bugs from situations where an Angle is
created via numpy channels that don't do the standard __new__ but instead
depend on array_finalize to set state. Longitude is used because the
bug was in its _wrap_angle not getting initialized correctly
"""
l1 = Longitude([1]*u.deg)
l2 = Longitude([2]*u.deg)
l = np.concatenate([l1, l2])
assert l._wrap_angle is not None
def test__str__():
"""
Check the __str__ method used in printing the Angle
"""
# scalar angle
scangle = Angle('10.2345d')
strscangle = scangle.__str__()
assert strscangle == '10d14m04.2s'
# non-scalar array angles
arrangle = Angle(['10.2345d', '-20d'])
strarrangle = arrangle.__str__()
assert strarrangle == '[10d14m04.2s -20d00m00s]'
# summarizing for large arrays, ... should appear
bigarrangle = Angle(np.ones(10000), u.deg)
assert '...' in bigarrangle.__str__()
def test_repr_latex():
"""
Check the _repr_latex_ method, used primarily by IPython notebooks
"""
# try with both scalar
scangle = Angle(2.1, u.deg)
rlscangle = scangle._repr_latex_()
# and array angles
arrangle = Angle([1, 2.1], u.deg)
rlarrangle = arrangle._repr_latex_()
assert rlscangle == r'$2^\circ06{}^\prime00{}^{\prime\prime}$'
assert rlscangle.split('$')[1] in rlarrangle
# make sure the ... appears for large arrays
bigarrangle = Angle(np.ones(50000)/50000., u.deg)
assert '...' in bigarrangle._repr_latex_()
def test_angle_with_cds_units_enabled():
"""Regression test for #5350
Especially the example in
https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
"""
from ...units import cds
# the problem is with the parser, so remove it temporarily
from ..angle_utilities import _AngleParser
del _AngleParser._parser
with cds.enable():
Angle('5d')
del _AngleParser._parser
Angle('5d')
|
2f27a66a053b16cccdd14d4e5c85dcf055fcce263ab66110cc7e840e33b9743f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for putting velocity differentials into SkyCoord objects.
Note: the skyoffset velocity tests are in a different file, in
test_skyoffset_transformations.py
"""
import pytest
import numpy as np
from ... import units as u
from ...tests.helper import assert_quantity_allclose
from .. import (SkyCoord, ICRS, SphericalRepresentation, SphericalDifferential,
SphericalCosLatDifferential, CartesianRepresentation,
CartesianDifferential, Galactic, PrecessedGeocentric)
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
def test_creation_frameobjs():
i = ICRS(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr)
sc = SkyCoord(i)
for attrnm in ['ra', 'dec', 'pm_ra_cosdec', 'pm_dec']:
assert_quantity_allclose(getattr(i, attrnm), getattr(sc, attrnm))
sc_nod = SkyCoord(ICRS(1*u.deg, 2*u.deg))
for attrnm in ['ra', 'dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_nod, attrnm))
def test_creation_attrs():
sc1 = SkyCoord(1*u.deg, 2*u.deg,
pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
frame='fk5')
assert_quantity_allclose(sc1.ra, 1*u.deg)
assert_quantity_allclose(sc1.dec, 2*u.deg)
assert_quantity_allclose(sc1.pm_ra_cosdec, .2*u.arcsec/u.kyr)
assert_quantity_allclose(sc1.pm_dec, .1*u.arcsec/u.kyr)
sc2 = SkyCoord(1*u.deg, 2*u.deg,
pm_ra=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
differential_type=SphericalDifferential)
assert_quantity_allclose(sc2.ra, 1*u.deg)
assert_quantity_allclose(sc2.dec, 2*u.deg)
assert_quantity_allclose(sc2.pm_ra, .2*u.arcsec/u.kyr)
assert_quantity_allclose(sc2.pm_dec, .1*u.arcsec/u.kyr)
sc3 = SkyCoord('1:2:3 4:5:6',
pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
unit=(u.hour, u.deg))
assert_quantity_allclose(sc3.ra, 1*u.hourangle + 2*u.arcmin*15 + 3*u.arcsec*15)
assert_quantity_allclose(sc3.dec, 4*u.deg + 5*u.arcmin + 6*u.arcsec)
# might as well check with sillier units?
assert_quantity_allclose(sc3.pm_ra_cosdec, 1.2776637006616473e-07 * u.arcmin / u.fortnight)
assert_quantity_allclose(sc3.pm_dec, 6.388318503308237e-08 * u.arcmin / u.fortnight)
def test_creation_copy_basic():
i = ICRS(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr)
sc = SkyCoord(i)
sc_cpy = SkyCoord(sc)
for attrnm in ['ra', 'dec', 'pm_ra_cosdec', 'pm_dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
def test_creation_copy_rediff():
sc = SkyCoord(1*u.deg, 2*u.deg,
pm_ra=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
differential_type=SphericalDifferential)
sc_cpy = SkyCoord(sc)
for attrnm in ['ra', 'dec', 'pm_ra', 'pm_dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
sc_newdiff = SkyCoord(sc, differential_type=SphericalCosLatDifferential)
reprepr = sc.represent_as(SphericalRepresentation, SphericalCosLatDifferential)
assert_quantity_allclose(sc_newdiff.pm_ra_cosdec,
reprepr.differentials['s'].d_lon_coslat)
def test_creation_cartesian():
rep = CartesianRepresentation([10, 0., 0.]*u.pc)
dif = CartesianDifferential([0, 100, 0.]*u.pc/u.Myr)
rep = rep.with_differentials(dif)
c = SkyCoord(rep)
sdif = dif.represent_as(SphericalCosLatDifferential, rep)
assert_quantity_allclose(c.pm_ra_cosdec, sdif.d_lon_coslat)
def test_useful_error_missing():
sc_nod = SkyCoord(ICRS(1*u.deg, 2*u.deg))
try:
sc_nod.l
except AttributeError as e:
# this is double-checking the *normal* behavior
msg_l = e.args[0]
try:
sc_nod.pm_dec
except Exception as e:
msg_pm_dec = e.args[0]
assert "has no attribute" in msg_l
assert "has no associated differentials" in msg_pm_dec
# ----------------------Operations on SkyCoords w/ velocities-------------------
# define some fixtures to get baseline coordinates to try operations with
@pytest.fixture(scope="module", params=[(False, False),
(True, False),
(False, True),
(True, True)])
def sc(request):
incldist, inclrv = request.param
args = [1*u.deg, 2*u.deg]
kwargs = dict(pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr)
if incldist:
kwargs['distance'] = 213.4*u.pc
if inclrv:
kwargs['radial_velocity'] = 61*u.km/u.s
return SkyCoord(*args, **kwargs)
@pytest.fixture(scope="module")
def scmany():
return SkyCoord(ICRS(ra=[1]*100*u.deg, dec=[2]*100*u.deg,
pm_ra_cosdec=np.random.randn(100)*u.mas/u.yr,
pm_dec=np.random.randn(100)*u.mas/u.yr,))
@pytest.fixture(scope="module")
def sc_for_sep():
return SkyCoord(1*u.deg, 2*u.deg,
pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr)
def test_separation(sc, sc_for_sep):
sc.separation(sc_for_sep)
def test_accessors(sc, scmany):
sc.data.differentials['s']
sph = sc.spherical
gal = sc.galactic
if (sc.data.get_name().startswith('unit') and not
sc.data.differentials['s'].get_name().startswith('unit')):
# this xfail can be eliminated when issue #7028 is resolved
pytest.xfail('.velocity fails if there is an RV but not distance')
sc.velocity
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
scmany[0]
sph = scmany.spherical
gal = scmany.galactic
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
def test_transforms(sc):
trans = sc.transform_to('galactic')
assert isinstance(trans.frame, Galactic)
def test_transforms_diff(sc):
# note that arguably this *should* fail for the no-distance cases: 3D
# information is necessary to truly solve this, hence the xfail
if not sc.distance.unit.is_equivalent(u.m):
pytest.xfail('Should fail for no-distance cases')
else:
trans = sc.transform_to(PrecessedGeocentric(equinox='B1975'))
assert isinstance(trans.frame, PrecessedGeocentric)
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_matching(sc, scmany):
# just check that it works and yields something
idx, d2d, d3d = sc.match_to_catalog_sky(scmany)
def test_position_angle(sc, sc_for_sep):
sc.position_angle(sc_for_sep)
def test_constellations(sc):
const = sc.get_constellation()
assert const == 'Pisces'
|
4c87a92fddc04ad12f5610971f63be352a734b0c30f02688788de84ff3a39965 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from ... import units as u
from ..matrix_utilities import rotation_matrix, angle_axis
def test_rotation_matrix():
assert_array_equal(rotation_matrix(0*u.deg, 'x'), np.eye(3))
assert_allclose(rotation_matrix(90*u.deg, 'y'), [[0, 0, -1],
[0, 1, 0],
[1, 0, 0]], atol=1e-12)
assert_allclose(rotation_matrix(-90*u.deg, 'z'), [[0, -1, 0],
[1, 0, 0],
[0, 0, 1]], atol=1e-12)
assert_allclose(rotation_matrix(45*u.deg, 'x'),
rotation_matrix(45*u.deg, [1, 0, 0]))
assert_allclose(rotation_matrix(125*u.deg, 'y'),
rotation_matrix(125*u.deg, [0, 1, 0]))
assert_allclose(rotation_matrix(-30*u.deg, 'z'),
rotation_matrix(-30*u.deg, [0, 0, 1]))
assert_allclose(np.dot(rotation_matrix(180*u.deg, [1, 1, 0]), [1, 0, 0]),
[0, 1, 0], atol=1e-12)
# make sure it also works for very small angles
assert_allclose(rotation_matrix(0.000001*u.deg, 'x'),
rotation_matrix(0.000001*u.deg, [1, 0, 0]))
def test_angle_axis():
m1 = rotation_matrix(35*u.deg, 'x')
an1, ax1 = angle_axis(m1)
assert an1 - 35*u.deg < 1e-10*u.deg
assert_allclose(ax1, [1, 0, 0])
m2 = rotation_matrix(-89*u.deg, [1, 1, 0])
an2, ax2 = angle_axis(m2)
assert an2 - 89*u.deg < 1e-10*u.deg
assert_allclose(ax2, [-2**-0.5, -2**-0.5, 0])
|
b5cc71779d590d61e219e59ffd1e76a20ee7a380c133ea356cd34bab0628f524 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ... import units as u
from ...tests.helper import (assert_quantity_allclose as
assert_allclose_quantity, catch_warnings)
from ...utils import isiterable
from ...utils.compat import NUMPY_LT_1_14
from ...utils.exceptions import AstropyDeprecationWarning
from ..angles import Longitude, Latitude, Angle
from ..distances import Distance
from ..representation import (REPRESENTATION_CLASSES,
DIFFERENTIAL_CLASSES,
BaseRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
SphericalCosLatDifferential,
CartesianRepresentation,
CylindricalRepresentation,
PhysicsSphericalRepresentation,
CartesianDifferential,
SphericalDifferential,
_combine_xyz)
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass (LogDRepresentation)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
class TestSphericalRepresentation:
def test_name(self):
assert SphericalRepresentation.get_name() == 'spherical'
assert SphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_lonlat(self):
s2 = SphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg),
Distance(10, u.kpc))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert s2.distance == 10. * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(Longitude(-90, u.degree,
wrap_angle=180*u.degree),
Latitude(-45, u.degree),
Distance(1., u.Rsun))
assert s3.lon == -90. * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1., 2.]), u.degree)
lat = Latitude(np.float32([3., 4.]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values['lon'].dtype == np.float32
assert s1._values['lat'].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
def test_broadcasting(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=10 * u.kpc)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters lon, lat, and distance cannot be broadcast"
def test_readonly(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg,
distance=1. * u.kpc)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
with pytest.raises(AttributeError):
s1.distance = 1. * u.kpc
def test_getitem_len_iterable(self):
s = SphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg,
distance=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
assert len(s) == 10
assert isiterable(s)
def test_getitem_len_iterable_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg,
distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
with pytest.raises(TypeError):
len(s)
assert not isiterable(s)
class TestUnitSphericalRepresentation:
def test_name(self):
assert UnitSphericalRepresentation.get_name() == 'unitspherical'
assert UnitSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = UnitSphericalRepresentation()
def test_init_quantity(self):
s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
def test_init_lonlat(self):
s2 = UnitSphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
def test_init_array(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
def test_reprobj(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
s2 = UnitSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
def test_broadcasting(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = UnitSphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg)
assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast"
def test_readonly(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
def test_getitem(self):
s = UnitSphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
def test_getitem_scalar(self):
s = UnitSphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg)
with pytest.raises(TypeError):
s_slc = s[0]
class TestPhysicsSphericalRepresentation:
def test_name(self):
assert PhysicsSphericalRepresentation.get_name() == 'physicsspherical'
assert PhysicsSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
assert s3.phi == 8. * u.hourangle
assert s3.theta == 5. * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(Angle(8, u.hour),
Angle(5, u.deg),
Distance(10, u.kpc))
assert s2.phi == 8. * u.hourangle
assert s2.theta == 5. * u.deg
assert s2.r == 10. * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8. * u.hourangle)
assert_allclose_quantity(s2.theta, 5. * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=10 * u.kpc)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = PhysicsSphericalRepresentation(phi=[8, 9, 10] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters phi, theta, and r cannot be broadcast"
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[10, 20] * u.kpc)
with pytest.raises(AttributeError):
s1.phi = 1. * u.deg
with pytest.raises(AttributeError):
s1.theta = 1. * u.deg
with pytest.raises(AttributeError):
s1.r = 1. * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(phi=np.arange(10) * u.deg,
theta=np.arange(5, 15) * u.deg,
r=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg,
theta=2 * u.deg,
r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
class TestCartesianRepresentation:
def test_name(self):
assert CartesianRepresentation.get_name() == 'cartesian'
assert CartesianRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CartesianRepresentation()
def test_init_quantity(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_singleunit(self):
s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc,
y=[2, 3, 4] * u.Mpc,
z=[3, 4, 5] * u.kpc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.Mpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, [1, 2, 3])
assert_allclose(s1.y.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_one_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.pc
assert s1.z.unit is u.pc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
r = np.arange(27.).reshape(3, 3, 3) * u.kpc
s2 = CartesianRepresentation(r, xyz_axis=0)
assert s2.shape == (3, 3)
assert s2.x.unit == u.kpc
assert np.all(s2.x == r[0])
assert np.all(s2.xyz == r)
assert np.all(s2.get_xyz(xyz_axis=0) == r)
s3 = CartesianRepresentation(r, xyz_axis=1)
assert s3.shape == (3, 3)
assert np.all(s3.x == r[:, 0])
assert np.all(s3.y == r[:, 1])
assert np.all(s3.z == r[:, 2])
assert np.all(s3.get_xyz(xyz_axis=1) == r)
s4 = CartesianRepresentation(r, xyz_axis=2)
assert s4.shape == (3, 3)
assert np.all(s4.x == r[:, :, 0])
assert np.all(s4.get_xyz(xyz_axis=2) == r)
s5 = CartesianRepresentation(r, unit=u.pc)
assert s5.x.unit == u.pc
assert np.all(s5.xyz == r)
s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2)
assert s6.x.unit == u.pc
assert np.all(s6.get_xyz(xyz_axis=2).value == r.value)
def test_init_one_array_size_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc)
assert exc.value.args[0].startswith("too many values to unpack")
def test_init_xyz_but_more_than_one_array_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc,
z=[3, 4, 5] * u.pc, xyz_axis=0)
assert 'xyz_axis should only be set' in str(exc)
def test_init_one_array_yz_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc)
assert exc.value.args[0] == ("x, y, and z are required to instantiate "
"CartesianRepresentation")
def test_init_array_nocopy(self):
x = [8, 9, 10] * u.pc
y = [5, 6, 7] * u.Mpc
z = [2, 3, 4] * u.kpc
s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False)
x[:] = [1, 2, 3] * u.kpc
y[:] = [9, 9, 8] * u.kpc
z[:] = [1, 2, 1] * u.kpc
assert_allclose_quantity(x, s1.x)
assert_allclose_quantity(y, s1.y)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
s2 = CartesianRepresentation.from_representation(s1)
assert s2.x == 1 * u.kpc
assert s2.y == 2 * u.kpc
assert s2.z == 3 * u.kpc
def test_broadcasting(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc)
assert s1.x.unit == u.kpc
assert s1.y.unit == u.kpc
assert s1.z.unit == u.kpc
assert_allclose(s1.x.value, [1, 2])
assert_allclose(s1.y.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast"
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.x = 1. * u.kpc
with pytest.raises(AttributeError):
s1.y = 1. * u.kpc
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def test_xyz(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert isinstance(s1.xyz, u.Quantity)
assert s1.xyz.unit is u.kpc
assert_allclose(s1.xyz.value, [1, 2, 3])
def test_unit_mismatch(self):
q_len = u.Quantity([1], u.km)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
def test_unit_non_length(self):
s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg)
s2 = CartesianRepresentation(x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s)
banana = u.def_unit('banana')
s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana)
def test_getitem(self):
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
def test_getitem_scalar(self):
s = CartesianRepresentation(x=1 * u.m,
y=-2 * u.m,
z=3 * u.km)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc)
matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
s2 = s1.transform(matrix)
assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert s2.x.unit is u.kpc
assert s2.y.unit is u.kpc
assert s2.z.unit is u.kpc
class TestCylindricalRepresentation:
def test_name(self):
assert CylindricalRepresentation.get_name() == 'cylindrical'
assert CylindricalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CylindricalRepresentation()
def test_init_quantity(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
assert s1.rho.unit is u.kpc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, 1)
assert_allclose(s1.phi.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CylindricalRepresentation(rho=[1, 2, 3] * u.pc,
phi=[2, 3, 4] * u.deg,
z=[3, 4, 5] * u.kpc)
assert s1.rho.unit is u.pc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, [1, 2, 3])
assert_allclose(s1.phi.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_array_nocopy(self):
rho = [8, 9, 10] * u.pc
phi = [5, 6, 7] * u.deg
z = [2, 3, 4] * u.kpc
s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False)
rho[:] = [9, 2, 3] * u.kpc
phi[:] = [1, 2, 3] * u.arcmin
z[:] = [-2, 3, 8] * u.kpc
assert_allclose_quantity(rho, s1.rho)
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
s2 = CylindricalRepresentation.from_representation(s1)
assert s2.rho == 1 * u.kpc
assert s2.phi == 2 * u.deg
assert s2.z == 3 * u.kpc
def test_broadcasting(self):
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc)
assert s1.rho.unit == u.kpc
assert s1.phi.unit == u.deg
assert s1.z.unit == u.kpc
assert_allclose(s1.rho.value, [1, 2])
assert_allclose(s1.phi.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters rho, phi, and z cannot be broadcast"
def test_readonly(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc,
phi=20 * u.deg,
z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.rho = 1. * u.kpc
with pytest.raises(AttributeError):
s1.phi = 20 * u.deg
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def unit_mismatch(self):
q_len = u.Quantity([1], u.kpc)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len)
assert exc.value.args[0] == "rho and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen)
assert exc.value.args[0] == "rho and z should have matching physical types"
def test_getitem(self):
s = CylindricalRepresentation(rho=np.arange(10) * u.pc,
phi=-np.arange(10) * u.deg,
z=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc)
assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = CylindricalRepresentation(rho=1 * u.pc,
phi=-2 * u.deg,
z=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_cartesian_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = SphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = SphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.lon, s4.lon)
assert_allclose_quantity(s2.lat, s4.lat)
assert_allclose_quantity(s2.distance, s4.distance)
def test_cartesian_physics_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
def test_spherical_physics_spherical_roundtrip():
s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s3.lon)
assert_allclose_quantity(s1.lat, s3.lat)
assert_allclose_quantity(s1.distance, s3.distance)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
assert_allclose_quantity(s1.lon, s4.phi)
assert_allclose_quantity(s1.lat, 90. * u.deg - s4.theta)
assert_allclose_quantity(s1.distance, s4.r)
def test_cartesian_cylindrical_roundtrip():
s1 = CartesianRepresentation(x=np.array([1., 2000.]) * u.kpc,
y=np.array([3000., 4.]) * u.pc,
z=np.array([5., 600.]) * u.cm)
s2 = CylindricalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = CylindricalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.rho, s4.rho)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.z, s4.z)
def test_unit_spherical_roundtrip():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = CartesianRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = UnitSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s4.lon)
assert_allclose_quantity(s1.lat, s4.lat)
def test_no_unnecessary_copies():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = s1.represent_as(UnitSphericalRepresentation)
assert s2 is s1
assert np.may_share_memory(s1.lon, s2.lon)
assert np.may_share_memory(s1.lat, s2.lat)
s3 = s1.represent_as(SphericalRepresentation)
assert np.may_share_memory(s1.lon, s3.lon)
assert np.may_share_memory(s1.lat, s3.lat)
s4 = s1.represent_as(CartesianRepresentation)
s5 = s4.represent_as(CylindricalRepresentation)
assert np.may_share_memory(s5.z, s4.z)
def test_representation_repr():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert repr(r1) == ('<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n'
' ({})>').format(' 1., 2.5, 1.' if NUMPY_LT_1_14
else '1., 2.5, 1.')
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert repr(r2) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' ({})>').format(' 1., 2., 3.' if NUMPY_LT_1_14
else '1., 2., 3.')
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
if NUMPY_LT_1_14:
assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' [( 1., 4., 9.), ( 2., 4., 10.), ( 3., 4., 11.)]>')
else:
assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>')
def test_representation_repr_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m')
if NUMPY_LT_1_14:
assert repr(cr) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[( 0., 9., 18.), ( 1., 10., 19.), ( 2., 11., 20.)],\n'
' [( 3., 12., 21.), ( 4., 13., 22.), ( 5., 14., 23.)],\n'
' [( 6., 15., 24.), ( 7., 16., 25.), ( 8., 17., 26.)]]>')
else:
assert repr(cr) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n'
' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n'
' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>')
# This was broken before.
if NUMPY_LT_1_14:
assert repr(cr.T) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[( 0., 9., 18.), ( 3., 12., 21.), ( 6., 15., 24.)],\n'
' [( 1., 10., 19.), ( 4., 13., 22.), ( 7., 16., 25.)],\n'
' [( 2., 11., 20.), ( 5., 14., 23.), ( 8., 17., 26.)]]>')
else:
assert repr(cr.T) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n'
' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n'
' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>')
def test_representation_str():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert str(r1) == ('( 1., 2.5, 1.) (deg, deg, kpc)' if NUMPY_LT_1_14 else
'(1., 2.5, 1.) (deg, deg, kpc)')
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert str(r2) == ('( 1., 2., 3.) kpc' if NUMPY_LT_1_14 else
'(1., 2., 3.) kpc')
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
assert str(r3) == ('[( 1., 4., 9.), ( 2., 4., 10.), ( 3., 4., 11.)] kpc'
if NUMPY_LT_1_14 else
'[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc')
def test_representation_str_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m')
if NUMPY_LT_1_14:
assert str(cr) == (
'[[( 0., 9., 18.), ( 1., 10., 19.), ( 2., 11., 20.)],\n'
' [( 3., 12., 21.), ( 4., 13., 22.), ( 5., 14., 23.)],\n'
' [( 6., 15., 24.), ( 7., 16., 25.), ( 8., 17., 26.)]] m')
else:
assert str(cr) == (
'[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n'
' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n'
' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m')
# This was broken before.
if NUMPY_LT_1_14:
assert str(cr.T) == (
'[[( 0., 9., 18.), ( 3., 12., 21.), ( 6., 15., 24.)],\n'
' [( 1., 10., 19.), ( 4., 13., 22.), ( 7., 16., 25.)],\n'
' [( 2., 11., 20.), ( 5., 14., 23.), ( 8., 17., 26.)]] m')
else:
assert str(cr.T) == (
'[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n'
' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n'
' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m')
def test_subclass_representation():
from ..builtin_frames import ICRS
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(cls, angle, unit=unit, wrap_angle=wrap_angle,
**kwargs)
return self
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = OrderedDict([('lon', Longitude180),
('lat', Latitude),
('distance', u.Quantity)])
recommended_units = {'lon': u.deg, 'lat': u.deg}
class ICRSWrap180(ICRS):
frame_specific_representation_info = ICRS._frame_specific_representation_info.copy()
frame_specific_representation_info[SphericalWrap180Representation] = \
frame_specific_representation_info[SphericalRepresentation]
default_representation = SphericalWrap180Representation
c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m)
assert c.ra.value == -1
assert c.ra.unit is u.deg
assert c.dec.value == -2
assert c.dec.unit is u.deg
def test_minimal_subclass():
# Basically to check what we document works;
# see doc/coordinates/representations.rst
class LogDRepresentation(BaseRepresentation):
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude),
('logd', u.Dex)])
def to_cartesian(self):
d = self.logd.physical
x = d * np.cos(self.lat) * np.cos(self.lon)
y = d * np.cos(self.lat) * np.sin(self.lon)
z = d * np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False)
ld1 = LogDRepresentation(90.*u.deg, 0.*u.deg, 1.*u.dex(u.kpc))
ld2 = LogDRepresentation(lon=90.*u.deg, lat=0.*u.deg, logd=1.*u.dex(u.kpc))
assert np.all(ld1.lon == ld2.lon)
assert np.all(ld1.lat == ld2.lat)
assert np.all(ld1.logd == ld2.logd)
c = ld1.to_cartesian()
assert_allclose_quantity(c.xyz, [0., 10., 0.] * u.kpc, atol=1.*u.npc)
ld3 = LogDRepresentation.from_cartesian(c)
assert np.all(ld3.lon == ld2.lon)
assert np.all(ld3.lat == ld2.lat)
assert np.all(ld3.logd == ld2.logd)
s = ld1.represent_as(SphericalRepresentation)
assert_allclose_quantity(s.lon, ld1.lon)
assert_allclose_quantity(s.distance, 10.*u.kpc)
assert_allclose_quantity(s.lat, ld1.lat)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), lon=1.*u.deg)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), True, False)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), foo='bar')
with pytest.raises(ValueError):
# check we cannot redefine an existing class.
class LogDRepresentation(BaseRepresentation):
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude),
('logr', u.Dex)])
def test_combine_xyz():
x, y, z = np.arange(27).reshape(3, 9) * u.kpc
xyz = _combine_xyz(x, y, z, xyz_axis=0)
assert xyz.shape == (3, 9)
assert np.all(xyz[0] == x)
assert np.all(xyz[1] == y)
assert np.all(xyz[2] == z)
x, y, z = np.arange(27).reshape(3, 3, 3) * u.kpc
xyz = _combine_xyz(x, y, z, xyz_axis=0)
assert xyz.ndim == 3
assert np.all(xyz[0] == x)
assert np.all(xyz[1] == y)
assert np.all(xyz[2] == z)
xyz = _combine_xyz(x, y, z, xyz_axis=1)
assert xyz.ndim == 3
assert np.all(xyz[:, 0] == x)
assert np.all(xyz[:, 1] == y)
assert np.all(xyz[:, 2] == z)
xyz = _combine_xyz(x, y, z, xyz_axis=-1)
assert xyz.ndim == 3
assert np.all(xyz[..., 0] == x)
assert np.all(xyz[..., 1] == y)
assert np.all(xyz[..., 2] == z)
class TestCartesianRepresentationWithDifferential:
def test_init_differential(self):
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
# Check that a single differential gets turned into a 1-item dict.
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# can also pass in an explicit dictionary
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'s': diff})
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# using the wrong key will cause it to fail
with pytest.raises(ValueError):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'1 / s2': diff})
# make sure other kwargs are handled properly
s1 = CartesianRepresentation(x=1, y=2, z=3,
differentials=diff, copy=False, unit=u.kpc)
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
with pytest.raises(TypeError): # invalid type passed to differentials
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials='garmonbozia')
# make sure differentials can't accept differentials
with pytest.raises(TypeError):
CartesianDifferential(d_x=1 * u.km/u.s, d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s, differentials=diff)
def test_init_differential_compatible(self):
# TODO: more extensive checking of this
# should fail - representation and differential not compatible
diff = SphericalDifferential(d_lon=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
with pytest.raises(TypeError):
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg,
distance=1*u.pc,
differentials=diff)
def test_init_differential_multiple_equivalent_keys(self):
d1 = CartesianDifferential(*[1, 2, 3] * u.km/u.s)
d2 = CartesianDifferential(*[4, 5, 6] * u.km/u.s)
# verify that the check against expected_unit validates against passing
# in two different but equivalent keys
with pytest.raises(ValueError):
r1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'s': d1, 'yr': d2})
def test_init_array_broadcasting(self):
arr1 = np.arange(8).reshape(4, 2) * u.km/u.s
diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1)
# shapes aren't compatible
arr2 = np.arange(27).reshape(3, 9) * u.kpc
with pytest.raises(ValueError):
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2,
differentials=diff)
arr2 = np.arange(8).reshape(4, 2) * u.kpc
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2,
differentials=diff)
assert rep.x.unit is u.kpc
assert rep.y.unit is u.kpc
assert rep.z.unit is u.kpc
assert len(rep.differentials) == 1
assert rep.differentials['s'] is diff
assert rep.xyz.shape == rep.differentials['s'].d_xyz.shape
def test_reprobj(self):
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg,
distance=1*u.pc,
differentials=diff)
r2 = CartesianRepresentation.from_representation(r1)
assert r2.get_name() == 'cartesian'
assert not r2.differentials
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError): # attribute is not settable
s1.differentials = 'thing'
def test_represent_as(self):
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
rep1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
# Only change the representation, drop the differential
new_rep = rep1.represent_as(SphericalRepresentation)
assert new_rep.get_name() == 'spherical'
assert not new_rep.differentials # dropped
# Pass in separate classes for representation, differential
new_rep = rep1.represent_as(SphericalRepresentation,
SphericalCosLatDifferential)
assert new_rep.get_name() == 'spherical'
assert new_rep.differentials['s'].get_name() == 'sphericalcoslat'
# Pass in a dictionary for the differential classes
new_rep = rep1.represent_as(SphericalRepresentation,
{'s': SphericalCosLatDifferential})
assert new_rep.get_name() == 'spherical'
assert new_rep.differentials['s'].get_name() == 'sphericalcoslat'
# make sure represent_as() passes through the differentials
for name in REPRESENTATION_CLASSES:
if name == 'radial':
# TODO: Converting a CartesianDifferential to a
# RadialDifferential fails, even on `master`
continue
new_rep = rep1.represent_as(REPRESENTATION_CLASSES[name],
DIFFERENTIAL_CLASSES[name])
assert new_rep.get_name() == name
assert len(new_rep.differentials) == 1
assert new_rep.differentials['s'].get_name() == name
with pytest.raises(ValueError) as excinfo:
rep1.represent_as('name')
assert 'use frame object' in str(excinfo.value)
def test_getitem(self):
d = CartesianDifferential(d_x=np.arange(10) * u.m/u.s,
d_y=-np.arange(10) * u.m/u.s,
d_z=1. * u.m/u.s)
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km,
differentials=d)
s_slc = s[2:8:2]
s_dif = s_slc.differentials['s']
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m/u.s)
assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m/u.s)
assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m/u.s)
def test_transform(self):
d1 = CartesianDifferential(d_x=[1, 2] * u.km/u.s,
d_y=[3, 4] * u.km/u.s,
d_z=[5, 6] * u.km/u.s)
r1 = CartesianRepresentation(x=[1, 2] * u.kpc,
y=[3, 4] * u.kpc,
z=[5, 6] * u.kpc,
differentials=d1)
matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
r2 = r1.transform(matrix)
d2 = r2.differentials['s']
assert_allclose_quantity(d2.d_x, [22., 28]*u.km/u.s)
assert_allclose_quantity(d2.d_y, [49, 64]*u.km/u.s)
assert_allclose_quantity(d2.d_z, [76, 100.]*u.km/u.s)
def test_with_differentials(self):
# make sure with_differential correctly creates a new copy with the same
# differential
cr = CartesianRepresentation([1, 2, 3]*u.kpc)
diff = CartesianDifferential([.1, .2, .3]*u.km/u.s)
cr2 = cr.with_differentials(diff)
assert cr.differentials != cr2.differentials
assert cr2.differentials['s'] is diff
# make sure it works even if a differential is present already
diff2 = CartesianDifferential([.1, .2, .3]*u.m/u.s)
cr3 = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff)
cr4 = cr3.with_differentials(diff2)
assert cr4.differentials['s'] != cr3.differentials['s']
assert cr4.differentials['s'] == diff2
# also ensure a *scalar* differential will works
cr5 = cr.with_differentials(diff)
assert len(cr5.differentials) == 1
assert cr5.differentials['s'] == diff
# make sure we don't update the original representation's dict
d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s)
d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s**2)
r1 = CartesianRepresentation(*np.random.random((3, 5)), unit=u.pc,
differentials=d1)
r2 = r1.with_differentials(d2)
assert r1.differentials['s'] is r2.differentials['s']
assert 's2' not in r1.differentials
assert 's2' in r2.differentials
def test_repr_with_differentials():
diff = CartesianDifferential([.1, .2, .3]*u.km/u.s)
cr = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff)
assert "has differentials w.r.t.: 's'" in repr(cr)
def test_to_cartesian():
"""
Test that to_cartesian drops the differential.
"""
sd = SphericalDifferential(d_lat=1*u.deg, d_lon=2*u.deg, d_distance=10*u.m)
sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m,
differentials=sd)
cart = sr.to_cartesian()
assert cart.get_name() == 'cartesian'
assert not cart.differentials
def test_recommended_units_deprecation():
sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m)
with catch_warnings(AstropyDeprecationWarning) as w:
sr.recommended_units
assert 'recommended_units' in str(w[0].message)
with catch_warnings(AstropyDeprecationWarning) as w:
class MyClass(SphericalRepresentation):
attr_classes = SphericalRepresentation.attr_classes
recommended_units = {}
assert 'recommended_units' in str(w[0].message)
@pytest.fixture
def unitphysics():
"""
This fixture is used
"""
had_unit = False
if hasattr(PhysicsSphericalRepresentation, '_unit_representation'):
orig = PhysicsSphericalRepresentation._unit_representation
had_unit = True
class UnitPhysicsSphericalRepresentation(BaseRepresentation):
attr_classes = OrderedDict([('phi', Angle),
('theta', Angle)])
def __init__(self, phi, theta, differentials=None, copy=True):
super().__init__(phi, theta, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg):
raise ValueError('Inclination angle(s) must be within '
'0 deg <= angle <= 180 deg, '
'got {0}'.format(theta.to(u.degree)))
@property
def phi(self):
return self._phi
@property
def theta(self):
return self._theta
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return OrderedDict(
(('phi', CartesianRepresentation(-sinphi, cosphi, 0., copy=False)),
('theta', CartesianRepresentation(costheta*cosphi,
costheta*sinphi,
-sintheta, copy=False))))
def scale_factors(self):
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('phi', sintheta),
('theta', l)))
def to_cartesian(self):
x = np.sin(self.theta) * np.cos(self.phi)
y = np.sin(self.theta) * np.sin(self.phi)
z = np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, copy=False)
def norm(self):
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled,
copy=False)
PhysicsSphericalRepresentation._unit_representation = UnitPhysicsSphericalRepresentation
yield UnitPhysicsSphericalRepresentation
if had_unit:
PhysicsSphericalRepresentation._unit_representation = orig
else:
del PhysicsSphericalRepresentation._unit_representation
# remove from the module-level representations, if present
REPRESENTATION_CLASSES.pop(UnitPhysicsSphericalRepresentation.get_name(), None)
def test_unitphysics(unitphysics):
obj = unitphysics(phi=0*u.deg, theta=10*u.deg)
objkw = unitphysics(phi=0*u.deg, theta=10*u.deg)
assert objkw.phi == obj.phi
assert objkw.theta == obj.theta
asphys = obj.represent_as(PhysicsSphericalRepresentation)
assert asphys.phi == obj.phi
assert asphys.theta == obj.theta
assert_allclose_quantity(asphys.r, 1*u.dimensionless_unscaled)
assph = obj.represent_as(SphericalRepresentation)
assert assph.lon == obj.phi
assert assph.lat == 80*u.deg
assert_allclose_quantity(assph.distance, 1*u.dimensionless_unscaled)
|
e4f6e86b9c0839c0c31d7d2db3918225ebd689c93c991421bbb49196a7271b2d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization of angles not already covered by the API tests"""
import pickle
import pytest
import numpy as np
from ..earth import EarthLocation, ELLIPSOIDS
from ..angles import Longitude, Latitude
from ...tests.helper import quantity_allclose
from ... import units as u
from ...time import Time
from ... import constants
from ..name_resolve import NameResolveError
def allclose_m14(a, b, rtol=1.e-14, atol=None):
if atol is None:
atol = 1.e-14 * getattr(a, 'unit', 1)
return quantity_allclose(a, b, rtol, atol)
def allclose_m8(a, b, rtol=1.e-8, atol=None):
if atol is None:
atol = 1.e-8 * getattr(a, 'unit', 1)
return quantity_allclose(a, b, rtol, atol)
def isclose_m14(val, ref):
return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)])
def isclose_m8(val, ref):
return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)])
def vvd(val, valok, dval, func, test, status):
"""Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)"""
assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit)
def test_gc2gd():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geocentric(x, y, z, u.m)
e, p, h = location.to_geodetic('WGS84')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('GRS80')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('WGS72')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_gd2gc():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS84')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='GRS80')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS72')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
class TestInput():
def setup(self):
self.lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg,
wrap_angle=180*u.deg)
self.lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg)
self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m)
self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h)
self.x, self.y, self.z = self.location.to_geocentric()
def test_default_ellipsoid(self):
assert self.location.ellipsoid == EarthLocation._ellipsoid
def test_geo_attributes(self):
assert all(np.all(_1 == _2)
for _1, _2 in zip(self.location.geodetic,
self.location.to_geodetic()))
assert all(np.all(_1 == _2)
for _1, _2 in zip(self.location.geocentric,
self.location.to_geocentric()))
def test_attribute_classes(self):
"""Test that attribute classes are correct (and not EarthLocation)"""
assert type(self.location.x) is u.Quantity
assert type(self.location.y) is u.Quantity
assert type(self.location.z) is u.Quantity
assert type(self.location.lon) is Longitude
assert type(self.location.lat) is Latitude
assert type(self.location.height) is u.Quantity
def test_input(self):
"""Check input is parsed correctly"""
# units of length should be assumed geocentric
geocentric = EarthLocation(self.x, self.y, self.z)
assert np.all(geocentric == self.location)
geocentric2 = EarthLocation(self.x.value, self.y.value, self.z.value,
self.x.unit)
assert np.all(geocentric2 == self.location)
geodetic = EarthLocation(self.lon, self.lat, self.h)
assert np.all(geodetic == self.location)
geodetic2 = EarthLocation(self.lon.to_value(u.degree),
self.lat.to_value(u.degree),
self.h.to_value(u.m))
assert np.all(geodetic2 == self.location)
geodetic3 = EarthLocation(self.lon, self.lat)
assert allclose_m14(geodetic3.lon.value,
self.location.lon.value)
assert allclose_m14(geodetic3.lat.value,
self.location.lat.value)
assert not np.any(isclose_m14(geodetic3.height.value,
self.location.height.value))
geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1])
assert allclose_m14(geodetic4.lon.value,
self.location.lon.value)
assert allclose_m14(geodetic4.lat.value,
self.location.lat.value)
assert allclose_m14(geodetic4.height[-1].value,
self.location.height[-1].value)
assert not np.any(isclose_m14(geodetic4.height[:-1].value,
self.location.height[:-1].value))
# check length unit preservation
geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc)
assert geocentric5.unit is u.pc
assert geocentric5.x.unit is u.pc
assert geocentric5.height.unit is u.pc
assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value)
geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc))
assert geodetic5.unit is u.pc
assert geodetic5.x.unit is u.pc
assert geodetic5.height.unit is u.pc
assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value)
def test_invalid_input(self):
"""Check invalid input raises exception"""
# incomprehensible by either raises TypeError
with pytest.raises(TypeError):
EarthLocation(self.lon, self.y, self.z)
# wrong units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.lon, self.lat, self.lat)
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.h, self.lon, self.lat)
# floats without a unit
with pytest.raises(TypeError):
EarthLocation.from_geocentric(self.x.value, self.y.value,
self.z.value)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geocentric(self.x, self.y, self.z[:5])
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geodetic(self.x, self.y, self.z)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5])
def test_slicing(self):
# test on WGS72 location, so we can check the ellipsoid is passed on
locwgs72 = EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid='WGS72')
loc_slice1 = locwgs72[4]
assert isinstance(loc_slice1, EarthLocation)
assert loc_slice1.unit is locwgs72.unit
assert loc_slice1.ellipsoid == locwgs72.ellipsoid == 'WGS72'
assert not loc_slice1.shape
with pytest.raises(TypeError):
loc_slice1[0]
with pytest.raises(IndexError):
len(loc_slice1)
loc_slice2 = locwgs72[4:6]
assert isinstance(loc_slice2, EarthLocation)
assert len(loc_slice2) == 2
assert loc_slice2.unit is locwgs72.unit
assert loc_slice2.ellipsoid == locwgs72.ellipsoid
assert loc_slice2.shape == (2,)
loc_x = locwgs72['x']
assert type(loc_x) is u.Quantity
assert loc_x.shape == locwgs72.shape
assert loc_x.unit is locwgs72.unit
def test_invalid_ellipsoid(self):
# unknown ellipsoid
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid='foo')
with pytest.raises(TypeError):
EarthLocation(self.lon, self.lat, self.h, ellipsoid='foo')
with pytest.raises(ValueError):
self.location.ellipsoid = 'foo'
with pytest.raises(ValueError):
self.location.to_geodetic('foo')
@pytest.mark.parametrize('ellipsoid', ELLIPSOIDS)
def test_ellipsoid(self, ellipsoid):
"""Test that different ellipsoids are understood, and differ"""
# check that heights differ for different ellipsoids
# need different tolerance, since heights are relative to ~6000 km
lon, lat, h = self.location.to_geodetic(ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m8(h.value, self.h.value)
else:
# Some heights are very similar for some; some lon, lat identical.
assert not np.all(isclose_m8(h.value, self.h.value))
# given lon, lat, height, check that x,y,z differ
location = EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid=ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m14(location.z.value, self.z.value)
else:
assert not np.all(isclose_m14(location.z.value, self.z.value))
def test_to_value(self):
loc = self.location
loc_ndarray = loc.view(np.ndarray)
assert np.all(loc.value == loc_ndarray)
loc2 = self.location.to(u.km)
loc2_ndarray = np.empty_like(loc_ndarray)
for coo in 'x', 'y', 'z':
loc2_ndarray[coo] = loc_ndarray[coo] / 1000.
assert np.all(loc2.value == loc2_ndarray)
loc2_value = self.location.to_value(u.km)
assert np.all(loc2_value == loc2_ndarray)
def test_pickling():
"""Regression test against #4304."""
el = EarthLocation(0.*u.m, 6000*u.km, 6000*u.km)
s = pickle.dumps(el)
el2 = pickle.loads(s)
assert el == el2
def test_repr_latex():
"""
Regression test for issue #4542
"""
somelocation = EarthLocation(lon='149:3:57.9', lat='-31:16:37.3')
somelocation._repr_latex_()
somelocation2 = EarthLocation(lon=[1., 2.]*u.deg, lat=[-1., 9.]*u.deg)
somelocation2._repr_latex_()
@pytest.mark.remote_data
def test_of_address():
# just a location
try:
loc = EarthLocation.of_address("New York, NY")
except NameResolveError as e:
# Google map API limit might surface even here in Travis CI.
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, 40.7128*u.degree)
assert quantity_allclose(loc.lon, -74.0059*u.degree)
assert np.allclose(loc.height.value, 0.)
# Put this one here as buffer to get around Google map API limit per sec.
# no match: This always raises NameResolveError
with pytest.raises(NameResolveError):
EarthLocation.of_address("lkjasdflkja")
# a location and height
try:
loc = EarthLocation.of_address("New York, NY", get_height=True)
except NameResolveError as e:
# Buffer above sometimes insufficient to get around API limit but
# we also do not want to drag things out with time.sleep(0.195),
# where 0.195 was empirically determined on some physical machine.
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, 40.7128*u.degree)
assert quantity_allclose(loc.lon, -74.0059*u.degree)
assert quantity_allclose(loc.height, 10.438659669*u.meter, atol=1.*u.cm)
def test_geodetic_tuple():
lat = 2*u.deg
lon = 10*u.deg
height = 100*u.m
el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height)
res1 = el.to_geodetic()
res2 = el.geodetic
assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat)
assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon)
assert res1.height == res2.height and quantity_allclose(res1.height, height)
def test_gravitational_redshift():
someloc = EarthLocation(lon=-87.7*u.deg, lat=37*u.deg)
sometime = Time('2017-8-21 18:26:40')
zg0 = someloc.gravitational_redshift(sometime)
# should be of order ~few mm/s change per week
zg_week = someloc.gravitational_redshift(sometime + 7 * u.day)
assert 1.*u.mm/u.s < abs(zg_week - zg0) < 1*u.cm/u.s
# ~cm/s over a half-year
zg_halfyear = someloc.gravitational_redshift(sometime + 0.5 * u.yr)
assert 1*u.cm/u.s < abs(zg_halfyear - zg0) < 1*u.dm/u.s
# but when back to the same time in a year, should be tenths of mm
# even over decades
zg_year = someloc.gravitational_redshift(sometime - 20 * u.year)
assert .1*u.mm/u.s < abs(zg_year - zg0) < 1*u.mm/u.s
# Check mass adjustments.
# If Jupiter and the moon are ignored, effect should be off by ~ .5 mm/s
masses = {'sun': constants.G*constants.M_sun,
'jupiter': 0*constants.G*u.kg,
'moon': 0*constants.G*u.kg}
zg_moonjup = someloc.gravitational_redshift(sometime, masses=masses)
assert .1*u.mm/u.s < abs(zg_moonjup - zg0) < 1*u.mm/u.s
# Check that simply not including the bodies gives the same result.
assert zg_moonjup == someloc.gravitational_redshift(sometime,
bodies=('sun',))
# And that earth can be given, even not as last argument
assert zg_moonjup == someloc.gravitational_redshift(
sometime, bodies=('earth', 'sun',))
# If the earth is also ignored, effect should be off by ~ 20 cm/s
# This also tests the conversion of kg to gravitational units.
masses['earth'] = 0*u.kg
zg_moonjupearth = someloc.gravitational_redshift(sometime, masses=masses)
assert 1*u.dm/u.s < abs(zg_moonjupearth - zg0) < 1*u.m/u.s
# If all masses are zero, redshift should be 0 as well.
masses['sun'] = 0*u.kg
assert someloc.gravitational_redshift(sometime, masses=masses) == 0
with pytest.raises(KeyError):
someloc.gravitational_redshift(sometime, bodies=('saturn',))
with pytest.raises(u.UnitsError):
masses = {'sun': constants.G*constants.M_sun,
'jupiter': constants.G*constants.M_jup,
'moon': 1*u.km, # wrong units!
'earth': constants.G*constants.M_earth}
someloc.gravitational_redshift(sometime, masses=masses)
|
5ea423f77321a17466f11ca49c8623036df5c006dabfa38a47ac954aaeaaa0b5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from copy import copy
from io import StringIO
import pytest
import numpy as np
from ..registry import _readers, _writers, _identifiers
from .. import registry as io_registry
from ...table import Table
from ... import units as u
_READERS_ORIGINAL = copy(_readers)
_WRITERS_ORIGINAL = copy(_writers)
_IDENTIFIERS_ORIGINAL = copy(_identifiers)
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
class TestData:
read = classmethod(io_registry.read)
write = io_registry.write
def setup_function(function):
_readers.clear()
_writers.clear()
_identifiers.clear()
def empty_reader(*args, **kwargs):
return TestData()
def empty_writer(table, *args, **kwargs):
pass
def empty_identifier(*args, **kwargs):
return True
def test_get_reader_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.get_reader('test', TestData)
assert str(exc.value).startswith(
"No reader defined for format 'test' and class 'TestData'")
def test_get_writer_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.get_writer('test', TestData)
assert str(exc.value).startswith(
"No writer defined for format 'test' and class 'TestData'")
def test_register_reader():
io_registry.register_reader('test1', TestData, empty_reader)
io_registry.register_reader('test2', TestData, empty_reader)
assert io_registry.get_reader('test1', TestData) == empty_reader
assert io_registry.get_reader('test2', TestData) == empty_reader
io_registry.unregister_reader('test1', TestData)
with pytest.raises(io_registry.IORegistryError):
io_registry.get_reader('test1', TestData)
assert io_registry.get_reader('test2', TestData) == empty_reader
io_registry.unregister_reader('test2', TestData)
with pytest.raises(io_registry.IORegistryError):
io_registry.get_reader('test2', TestData)
def test_register_writer():
io_registry.register_writer('test1', TestData, empty_writer)
io_registry.register_writer('test2', TestData, empty_writer)
assert io_registry.get_writer('test1', TestData) == empty_writer
assert io_registry.get_writer('test2', TestData) == empty_writer
io_registry.unregister_writer('test1', TestData)
with pytest.raises(io_registry.IORegistryError):
io_registry.get_writer('test1', TestData)
assert io_registry.get_writer('test2', TestData) == empty_writer
io_registry.unregister_writer('test2', TestData)
with pytest.raises(io_registry.IORegistryError):
io_registry.get_writer('test2', TestData)
def test_register_identifier():
io_registry.register_identifier('test1', TestData, empty_identifier)
io_registry.register_identifier('test2', TestData, empty_identifier)
io_registry.unregister_identifier('test1', TestData)
io_registry.unregister_identifier('test2', TestData)
def test_register_reader_invalid():
io_registry.register_reader('test', TestData, empty_reader)
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.register_reader('test', TestData, empty_reader)
assert (str(exc.value) == "Reader for format 'test' and class 'TestData' "
"is already defined")
def test_register_writer_invalid():
io_registry.register_writer('test', TestData, empty_writer)
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.register_writer('test', TestData, empty_writer)
assert (str(exc.value) == "Writer for format 'test' and class 'TestData' "
"is already defined")
def test_register_identifier_invalid():
io_registry.register_identifier('test', TestData, empty_identifier)
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.register_identifier('test', TestData, empty_identifier)
assert (str(exc.value) == "Identifier for format 'test' and class "
"'TestData' is already defined")
def test_unregister_reader_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.unregister_reader('test', TestData)
assert str(exc.value) == "No reader defined for format 'test' and class 'TestData'"
def test_unregister_writer_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.unregister_writer('test', TestData)
assert str(exc.value) == "No writer defined for format 'test' and class 'TestData'"
def test_unregister_identifier_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.unregister_identifier('test', TestData)
assert str(exc.value) == "No identifier defined for format 'test' and class 'TestData'"
def test_register_reader_force():
io_registry.register_reader('test', TestData, empty_reader)
io_registry.register_reader('test', TestData, empty_reader, force=True)
def test_register_writer_force():
io_registry.register_writer('test', TestData, empty_writer)
io_registry.register_writer('test', TestData, empty_writer, force=True)
def test_register_identifier_force():
io_registry.register_identifier('test', TestData, empty_identifier)
io_registry.register_identifier('test', TestData, empty_identifier, force=True)
def test_read_noformat():
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read()
assert str(exc.value).startswith("Format could not be identified.")
def test_write_noformat():
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write()
assert str(exc.value).startswith("Format could not be identified.")
def test_read_noformat_arbitrary():
"""Test that all identifier functions can accept arbitrary input"""
_identifiers.update(_IDENTIFIERS_ORIGINAL)
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(object())
assert str(exc.value).startswith("Format could not be identified.")
def test_read_noformat_arbitrary_file(tmpdir):
"""Tests that all identifier functions can accept arbitrary files"""
_readers.update(_READERS_ORIGINAL)
testfile = str(tmpdir.join('foo.example'))
with open(testfile, 'w') as f:
f.write("Hello world")
with pytest.raises(io_registry.IORegistryError) as exc:
Table.read(testfile)
assert str(exc.value).startswith("Format could not be identified.")
def test_write_noformat_arbitrary():
"""Test that all identifier functions can accept arbitrary input"""
_identifiers.update(_IDENTIFIERS_ORIGINAL)
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write(object())
assert str(exc.value).startswith("Format could not be identified.")
def test_write_noformat_arbitrary_file(tmpdir):
"""Tests that all identifier functions can accept arbitrary files"""
_writers.update(_WRITERS_ORIGINAL)
testfile = str(tmpdir.join('foo.example'))
with pytest.raises(io_registry.IORegistryError) as exc:
Table().write(testfile)
assert str(exc.value).startswith("Format could not be identified.")
def test_read_toomanyformats():
io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True)
io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True)
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read()
assert str(exc.value) == "Format is ambiguous - options are: test1, test2"
def test_write_toomanyformats():
io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True)
io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True)
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write()
assert str(exc.value) == "Format is ambiguous - options are: test1, test2"
def test_read_format_noreader():
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(format='test')
assert str(exc.value).startswith(
"No reader defined for format 'test' and class 'TestData'")
def test_write_format_nowriter():
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write(format='test')
assert str(exc.value).startswith(
"No writer defined for format 'test' and class 'TestData'")
def test_read_identifier(tmpdir):
io_registry.register_identifier(
'test1', TestData,
lambda o, path, fileobj, *x, **y: path.endswith('a'))
io_registry.register_identifier(
'test2', TestData,
lambda o, path, fileobj, *x, **y: path.endswith('b'))
# Now check that we got past the identifier and are trying to get
# the reader. The io_registry.get_reader will fail but the error message
# will tell us if the identifier worked.
filename = tmpdir.join("testfile.a").strpath
open(filename, 'w').close()
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(filename)
assert str(exc.value).startswith(
"No reader defined for format 'test1' and class 'TestData'")
filename = tmpdir.join("testfile.b").strpath
open(filename, 'w').close()
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(filename)
assert str(exc.value).startswith(
"No reader defined for format 'test2' and class 'TestData'")
def test_write_identifier():
io_registry.register_identifier('test1', TestData, lambda o, *x, **y: x[0].startswith('a'))
io_registry.register_identifier('test2', TestData, lambda o, *x, **y: x[0].startswith('b'))
# Now check that we got past the identifier and are trying to get
# the reader. The io_registry.get_writer will fail but the error message
# will tell us if the identifier worked.
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write('abc')
assert str(exc.value).startswith(
"No writer defined for format 'test1' and class 'TestData'")
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write('bac')
assert str(exc.value).startswith(
"No writer defined for format 'test2' and class 'TestData'")
def test_identifier_origin():
io_registry.register_identifier('test1', TestData, lambda o, *x, **y: o == 'read')
io_registry.register_identifier('test2', TestData, lambda o, *x, **y: o == 'write')
io_registry.register_reader('test1', TestData, empty_reader)
io_registry.register_writer('test2', TestData, empty_writer)
# There should not be too many formats defined
TestData.read()
TestData().write()
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(format='test2')
assert str(exc.value).startswith(
"No reader defined for format 'test2' and class 'TestData'")
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write(format='test1')
assert str(exc.value).startswith(
"No writer defined for format 'test1' and class 'TestData'")
def test_read_valid_return():
io_registry.register_reader('test', TestData, lambda: TestData())
t = TestData.read(format='test')
assert isinstance(t, TestData)
def test_non_existing_unknown_ext():
"""Raise the correct error when attempting to read a non-existing
file with an unknown extension."""
with pytest.raises(OSError):
data = Table.read('non-existing-file-with-unknown.ext')
def test_read_basic_table():
data = np.array(list(zip([1, 2, 3], ['a', 'b', 'c'])),
dtype=[(str('A'), int), (str('B'), '|U1')])
io_registry.register_reader('test', Table, lambda x: Table(x))
t = Table.read(data, format='test')
assert t.keys() == ['A', 'B']
for i in range(3):
assert t['A'][i] == data['A'][i]
assert t['B'][i] == data['B'][i]
def test_register_readers_with_same_name_on_different_classes():
# No errors should be generated if the same name is registered for
# different objects...but this failed under python3
io_registry.register_reader('test', TestData, lambda: TestData())
io_registry.register_reader('test', Table, lambda: Table())
t = TestData.read(format='test')
assert isinstance(t, TestData)
tbl = Table.read(format='test')
assert isinstance(tbl, Table)
def test_inherited_registration():
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(Table):
pass
class Child2(Child1):
pass
def _read():
return Table()
def _read1():
return Child1()
# check that reader gets inherited
io_registry.register_reader('test', Table, _read)
assert io_registry.get_reader('test', Child2) is _read
# check that nearest ancestor is identified
# (i.e. that the reader for Child2 is the registered method
# for Child1, and not Table)
io_registry.register_reader('test', Child1, _read1)
assert io_registry.get_reader('test', Child2) is _read1
def teardown_function(function):
_readers.update(_READERS_ORIGINAL)
_writers.update(_WRITERS_ORIGINAL)
_identifiers.update(_IDENTIFIERS_ORIGINAL)
class TestSubclass:
"""
Test using registry with a Table sub-class
"""
def test_read_table_subclass(self):
class MyTable(Table):
pass
data = ['a b', '1 2']
mt = MyTable.read(data, format='ascii')
t = Table.read(data, format='ascii')
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(mt) is MyTable
def test_write_table_subclass(self):
buffer = StringIO()
class MyTable(Table):
pass
mt = MyTable([[1], [2]], names=['a', 'b'])
mt.write(buffer, format='ascii')
assert buffer.getvalue() == os.linesep.join(['a b', '1 2', ''])
def test_read_table_subclass_with_columns_attributes(self, tmpdir):
"""Regression test for https://github.com/astropy/astropy/issues/7181
"""
class MTable(Table):
pass
mt = MTable([[1, 2.5]], names=['a'])
mt['a'].unit = u.m
mt['a'].format = '.4f'
mt['a'].description = 'hello'
testfile = str(tmpdir.join('junk.fits'))
mt.write(testfile, overwrite=True)
t = MTable.read(testfile)
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(t) is MTable
assert t['a'].unit == u.m
if HAS_YAML:
assert t['a'].format == '.4f'
assert t['a'].description == 'hello'
else:
assert t['a'].format is None
assert t['a'].description is None
|
8d876b9f7981eb148c6b28ae14787189cc7f64230fc618bd4c1727dfa3c98306 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: Test FITS parsing
# STDLIB
import io
import re
import sys
import gzip
import base64
import codecs
import urllib.request
import warnings
# THIRD-PARTY
import numpy as np
from numpy import ma
# LOCAL
from .. import fits
from ... import __version__ as astropy_version
from ...utils.collections import HomogeneousList
from ...utils.xml.writer import XMLWriter
from ...utils.exceptions import AstropyDeprecationWarning
from ...utils.misc import InheritDocstrings
from . import converters
from .exceptions import (warn_or_raise, vo_warn, vo_raise, vo_reraise,
warn_unknown_attrs, W06, W07, W08, W09, W10, W11, W12,
W13, W15, W17, W18, W19, W20, W21, W22, W26, W27, W28,
W29, W32, W33, W35, W36, W37, W38, W40, W41, W42, W43,
W44, W45, W50, W52, W53, E06, E08, E09, E10, E11, E12,
E13, E15, E16, E17, E18, E19, E20, E21)
from . import ucd as ucd_mod
from . import util
from . import xmlutil
try:
from . import tablewriter
_has_c_tabledata_writer = True
except ImportError:
_has_c_tabledata_writer = False
__all__ = [
'Link', 'Info', 'Values', 'Field', 'Param', 'CooSys',
'FieldRef', 'ParamRef', 'Group', 'Table', 'Resource',
'VOTableFile'
]
# The default number of rows to read in each chunk before converting
# to an array.
DEFAULT_CHUNK_SIZE = 256
RESIZE_AMOUNT = 1.5
######################################################################
# FACTORY FUNCTIONS
def _resize(masked, new_size):
"""
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this.
"""
new_array = ma.zeros((new_size,), dtype=masked.dtype)
length = min(len(masked), new_size)
new_array[:length] = masked[:length]
return new_array
def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):
"""
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute.
"""
def lookup_by_attr(self, ref, before=None):
"""
Given a string *ref*, finds the first element in the iterator
where the given attribute == *ref*. If *before* is provided,
will stop searching at the object *before*. This is
important, since "forward references" are not allowed in the
VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if getattr(element, attr, None) == ref:
vo_raise(
"{} references itself".format(element_name),
element._config, element._pos, KeyError)
break
if getattr(element, attr, None) == ref:
yield element
def lookup_by_attr_unique(self, ref, before=None):
for element in lookup_by_attr(self, ref, before=before):
return element
raise KeyError(
"No {} with {} '{}' found before the referencing {}".format(
element_name, attr, ref, element_name))
if unique:
lookup_by_attr_unique.__doc__ = doc
return lookup_by_attr_unique
else:
lookup_by_attr.__doc__ = doc
return lookup_by_attr
def _lookup_by_id_or_name_factory(iterator, element_name, doc):
"""
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes.
"""
def lookup_by_id_or_name(self, ref, before=None):
"""
Given an key *ref*, finds the first element in the iterator
with the attribute ID == *ref* or name == *ref*. If *before*
is provided, will stop searching at the object *before*. This
is important, since "forward references" are not allowed in
the VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if ref in (element.ID, element.name):
vo_raise(
"{} references itself".format(element_name),
element._config, element._pos, KeyError)
break
if ref in (element.ID, element.name):
return element
raise KeyError(
"No {} with ID or name '{}' found before the referencing {}".format(
element_name, ref, element_name))
lookup_by_id_or_name.__doc__ = doc
return lookup_by_id_or_name
def _get_default_unit_format(config):
"""
Get the default unit format as specified in the VOTable spec.
"""
# In the future, this should take into account the VOTable
# version.
return 'cds'
def _get_unit_format(config):
"""
Get the unit format based on the configuration.
"""
if config.get('unit_format') is None:
format = _get_default_unit_format(config)
else:
format = config['unit_format']
return format
######################################################################
# ATTRIBUTE CHECKERS
def check_astroyear(year, field, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if (year is not None and
re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None):
warn_or_raise(W07, W07, (field, year), config, pos)
return False
return True
def check_string(string, attr_name, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*string* is not a string or Unicode string.
Parameters
----------
string : str
An astronomical year string
attr_name : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if string is not None and not isinstance(string, str):
warn_or_raise(W08, W08, attr_name, config, pos)
return False
return True
def resolve_id(ID, id, config=None, pos=None):
if ID is None and id is not None:
warn_or_raise(W09, W09, (), config, pos)
return id
return ID
def check_ucd(ucd, config=None, pos=None):
"""
Warns or raises a
`~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not
a valid `unified content descriptor`_ string as defined by the
VOTABLE standard.
Parameters
----------
ucd : str
A UCD string.
config, pos : optional
Information about the source of the value
"""
if config is None:
config = {}
if config.get('version_1_1_or_later'):
try:
ucd_mod.parse_ucd(
ucd,
check_controlled_vocabulary=config.get(
'version_1_2_or_later', False),
has_colon=config.get('version_1_2_or_later', False))
except ValueError as e:
# This weird construction is for Python 3 compatibility
if config.get('pedantic'):
vo_raise(W06, (ucd, str(e)), config, pos)
else:
vo_warn(W06, (ucd, str(e)), config, pos)
return False
return True
######################################################################
# PROPERTY MIXINS
class _IDProperty:
@property
def ID(self):
"""
The XML ID_ of the element. May be `None` or a string
conforming to XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@ID.deleter
def ID(self):
self._ID = None
class _NameProperty:
@property
def name(self):
"""An optional name for the element."""
return self._name
@name.setter
def name(self, name):
xmlutil.check_token(name, 'name', self._config, self._pos)
self._name = name
@name.deleter
def name(self):
self._name = None
class _XtypeProperty:
@property
def xtype(self):
"""Extended data type information."""
return self._xtype
@xtype.setter
def xtype(self, xtype):
if xtype is not None and not self._config.get('version_1_2_or_later'):
warn_or_raise(
W28, W28, ('xtype', self._element_name, '1.2'),
self._config, self._pos)
check_string(xtype, 'xtype', self._config, self._pos)
self._xtype = xtype
@xtype.deleter
def xtype(self):
self._xtype = None
class _UtypeProperty:
_utype_in_v1_2 = False
@property
def utype(self):
"""The usage-specific or `unique type`_ of the element."""
return self._utype
@utype.setter
def utype(self, utype):
if (self._utype_in_v1_2 and
utype is not None and
not self._config.get('version_1_2_or_later')):
warn_or_raise(
W28, W28, ('utype', self._element_name, '1.2'),
self._config, self._pos)
check_string(utype, 'utype', self._config, self._pos)
self._utype = utype
@utype.deleter
def utype(self):
self._utype = None
class _UcdProperty:
_ucd_in_v1_2 = False
@property
def ucd(self):
"""The `unified content descriptor`_ for the element."""
return self._ucd
@ucd.setter
def ucd(self, ucd):
if ucd is not None and ucd.strip() == '':
ucd = None
if ucd is not None:
if (self._ucd_in_v1_2 and
not self._config.get('version_1_2_or_later')):
warn_or_raise(
W28, W28, ('ucd', self._element_name, '1.2'),
self._config, self._pos)
check_ucd(ucd, self._config, self._pos)
self._ucd = ucd
@ucd.deleter
def ucd(self):
self._ucd = None
class _DescriptionProperty:
@property
def description(self):
"""
An optional string describing the element. Corresponds to the
DESCRIPTION_ element.
"""
return self._description
@description.setter
def description(self, description):
self._description = description
@description.deleter
def description(self):
self._description = None
######################################################################
# ELEMENT CLASSES
class Element(metaclass=InheritDocstrings):
"""
A base class for all classes that represent XML elements in the
VOTABLE file.
"""
_element_name = ''
_attr_list = []
def _add_unknown_tag(self, iterator, tag, data, config, pos):
warn_or_raise(W10, W10, tag, config, pos)
def _ignore_add(self, iterator, tag, data, config, pos):
warn_unknown_attrs(tag, data.keys(), config, pos)
def _add_definitions(self, iterator, tag, data, config, pos):
if config.get('version_1_1_or_later'):
warn_or_raise(W22, W22, (), config, pos)
warn_unknown_attrs(tag, data.keys(), config, pos)
def parse(self, iterator, config):
"""
For internal use. Parse the XML content of the children of the
element.
Parameters
----------
iterator : xml iterator
An iterator over XML elements as returned by
`~astropy.utils.xml.iterparser.get_xml_iterator`.
config : dict
The configuration dictionary that affects how certain
elements are read.
Returns
-------
self : Element
Returns self as a convenience.
"""
raise NotImplementedError()
def to_xml(self, w, **kwargs):
"""
For internal use. Output the element to XML.
Parameters
----------
w : astropy.utils.xml.writer.XMLWriter object
An XML writer to write to.
kwargs : dict
Any configuration parameters to control the output.
"""
raise NotImplementedError()
class SimpleElement(Element):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
Element.__init__(self)
def __repr__(self):
buff = io.StringIO()
SimpleElement.to_xml(self, XMLWriter(buff))
return buff.getvalue().strip()
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name,
attrib=w.object_attrs(self, self._attr_list))
class SimpleElementWithContent(SimpleElement):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
SimpleElement.__init__(self)
self._content = None
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
if data:
self.content = data
break
return self
def to_xml(self, w, **kwargs):
w.element(self._element_name, self._content,
attrib=w.object_attrs(self, self._attr_list))
@property
def content(self):
"""The content of the element."""
return self._content
@content.setter
def content(self, content):
check_string(content, 'content', self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
class Link(SimpleElement, _IDProperty):
"""
LINK_ elements: used to reference external documents and servers through a URI.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'content_role', 'content_type', 'title', 'value',
'href', 'action']
_element_name = 'LINK'
def __init__(self, ID=None, title=None, value=None, href=None, action=None,
id=None, config=None, pos=None, **kwargs):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
content_role = kwargs.get('content-role') or kwargs.get('content_role')
content_type = kwargs.get('content-type') or kwargs.get('content_type')
if 'gref' in kwargs:
warn_or_raise(W11, W11, (), config, pos)
self.ID = resolve_id(ID, id, config, pos)
self.content_role = content_role
self.content_type = content_type
self.title = title
self.value = value
self.href = href
self.action = action
warn_unknown_attrs(
'LINK', kwargs.keys(), config, pos,
['content-role', 'content_role', 'content-type', 'content_type',
'gref'])
@property
def content_role(self):
"""
Defines the MIME role of the referenced object. Must be one of:
None, 'query', 'hints', 'doc', 'location' or 'type'
"""
return self._content_role
@content_role.setter
def content_role(self, content_role):
if ((content_role == 'type' and
not self._config['version_1_3_or_later']) or
content_role not in
(None, 'query', 'hints', 'doc', 'location')):
vo_warn(W45, (content_role,), self._config, self._pos)
self._content_role = content_role
@content_role.deleter
def content_role(self):
self._content_role = None
@property
def content_type(self):
"""Defines the MIME content type of the referenced object."""
return self._content_type
@content_type.setter
def content_type(self, content_type):
xmlutil.check_mime_content_type(content_type, self._config, self._pos)
self._content_type = content_type
@content_type.deleter
def content_type(self):
self._content_type = None
@property
def href(self):
"""
A URI to an arbitrary protocol. The vo package only supports
http and anonymous ftp.
"""
return self._href
@href.setter
def href(self, href):
xmlutil.check_anyuri(href, self._config, self._pos)
self._href = href
@href.deleter
def href(self):
self._href = None
def to_table_column(self, column):
meta = {}
for key in self._attr_list:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
column.meta.setdefault('links', [])
column.meta['links'].append(meta)
@classmethod
def from_table_column(cls, d):
return cls(**d)
class Info(SimpleElementWithContent, _IDProperty, _XtypeProperty,
_UtypeProperty):
"""
INFO_ elements: arbitrary key-value pairs for extensions to the standard.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_element_name = 'INFO'
_attr_list_11 = ['ID', 'name', 'value']
_attr_list_12 = _attr_list_11 + ['xtype', 'ref', 'unit', 'ucd', 'utype']
_utype_in_v1_2 = True
def __init__(self, ID=None, name=None, value=None, id=None, xtype=None,
ref=None, unit=None, ucd=None, utype=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElementWithContent.__init__(self)
self.ID = (resolve_id(ID, id, config, pos) or
xmlutil.fix_id(name, config, pos))
self.name = name
self.value = value
self.xtype = xtype
self.ref = ref
self.unit = unit
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs('INFO', ['xtype'], config, pos)
if ref is not None:
warn_unknown_attrs('INFO', ['ref'], config, pos)
if unit is not None:
warn_unknown_attrs('INFO', ['unit'], config, pos)
if ucd is not None:
warn_unknown_attrs('INFO', ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs('INFO', ['utype'], config, pos)
warn_unknown_attrs('INFO', extra.keys(), config, pos)
@property
def name(self):
"""[*required*] The key of the key-value pair."""
return self._name
@name.setter
def name(self, name):
if name is None:
warn_or_raise(W35, W35, ('name'), self._config, self._pos)
xmlutil.check_token(name, 'name', self._config, self._pos)
self._name = name
@property
def value(self):
"""
[*required*] The value of the key-value pair. (Always stored
as a string or unicode string).
"""
return self._value
@value.setter
def value(self, value):
if value is None:
warn_or_raise(W35, W35, ('value'), self._config, self._pos)
check_string(value, 'value', self._config, self._pos)
self._value = value
@property
def content(self):
"""The content inside the INFO element."""
return self._content
@content.setter
def content(self, content):
check_string(content, 'content', self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
@property
def ref(self):
"""
Refer to another INFO_ element by ID_, defined previously in
the document.
"""
return self._ref
@ref.setter
def ref(self, ref):
if ref is not None and not self._config.get('version_1_2_or_later'):
warn_or_raise(W28, W28, ('ref', 'INFO', '1.2'),
self._config, self._pos)
xmlutil.check_id(ref, 'ref', self._config, self._pos)
# TODO: actually apply the reference
# if ref is not None:
# try:
# other = self._votable.get_values_by_id(ref, before=self)
# except KeyError:
# vo_raise(
# "VALUES ref='%s', which has not already been defined." %
# self.ref, self._config, self._pos, KeyError)
# self.null = other.null
# self.type = other.type
# self.min = other.min
# self.min_inclusive = other.min_inclusive
# self.max = other.max
# self.max_inclusive = other.max_inclusive
# self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the INFO_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from ... import units as u
if not self._config.get('version_1_2_or_later'):
warn_or_raise(W28, W28, ('unit', 'INFO', '1.2'),
self._config, self._pos)
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(
unit, format=default_format, parse_strict='silent')
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,),
self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(
unit, format=format, parse_strict='silent')
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if 'unit' in attrib:
attrib['unit'] = self.unit.to_string('cds')
w.element(self._element_name, self._content,
attrib=attrib)
class Values(Element, _IDProperty):
"""
VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, votable, field, ID=None, null=None, ref=None,
type="legal", id=None, config=None, pos=None, **extras):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._votable = votable
self._field = field
self.ID = resolve_id(ID, id, config, pos)
self.null = null
self._ref = ref
self.type = type
self.min = None
self.max = None
self.min_inclusive = True
self.max_inclusive = True
self._options = []
warn_unknown_attrs('VALUES', extras.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
self.to_xml(XMLWriter(buff))
return buff.getvalue().strip()
@property
def null(self):
"""
For integral datatypes, *null* is used to define the value
used for missing values.
"""
return self._null
@null.setter
def null(self, null):
if null is not None and isinstance(null, str):
try:
null_val = self._field.converter.parse_scalar(
null, self._config, self._pos)[0]
except Exception:
warn_or_raise(W36, W36, null, self._config, self._pos)
null_val = self._field.converter.parse_scalar(
'0', self._config, self._pos)[0]
else:
null_val = null
self._null = null_val
@null.deleter
def null(self):
self._null = None
@property
def type(self):
"""
[*required*] Defines the applicability of the domain defined
by this VALUES_ element. Must be one of the following
strings:
- 'legal': The domain of this column applies in general to
this datatype. (default)
- 'actual': The domain of this column applies only to the
data enclosed in the parent table.
"""
return self._type
@type.setter
def type(self, type):
if type not in ('legal', 'actual'):
vo_raise(E08, type, self._config, self._pos)
self._type = type
@property
def ref(self):
"""
Refer to another VALUES_ element by ID_, defined previously in
the document, for MIN/MAX/OPTION information.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
if ref is not None:
try:
other = self._votable.get_values_by_id(ref, before=self)
except KeyError:
warn_or_raise(W43, W43, ('VALUES', self.ref), self._config,
self._pos)
ref = None
else:
self.null = other.null
self.type = other.type
self.min = other.min
self.min_inclusive = other.min_inclusive
self.max = other.max
self.max_inclusive = other.max_inclusive
self._options[:] = other.options
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def min(self):
"""
The minimum value of the domain. See :attr:`min_inclusive`.
"""
return self._min
@min.setter
def min(self, min):
if hasattr(self._field, 'converter') and min is not None:
self._min = self._field.converter.parse(min)[0]
else:
self._min = min
@min.deleter
def min(self):
self._min = None
@property
def min_inclusive(self):
"""When `True`, the domain includes the minimum value."""
return self._min_inclusive
@min_inclusive.setter
def min_inclusive(self, inclusive):
if inclusive == 'yes':
self._min_inclusive = True
elif inclusive == 'no':
self._min_inclusive = False
else:
self._min_inclusive = bool(inclusive)
@min_inclusive.deleter
def min_inclusive(self):
self._min_inclusive = True
@property
def max(self):
"""
The maximum value of the domain. See :attr:`max_inclusive`.
"""
return self._max
@max.setter
def max(self, max):
if hasattr(self._field, 'converter') and max is not None:
self._max = self._field.converter.parse(max)[0]
else:
self._max = max
@max.deleter
def max(self):
self._max = None
@property
def max_inclusive(self):
"""When `True`, the domain includes the maximum value."""
return self._max_inclusive
@max_inclusive.setter
def max_inclusive(self, inclusive):
if inclusive == 'yes':
self._max_inclusive = True
elif inclusive == 'no':
self._max_inclusive = False
else:
self._max_inclusive = bool(inclusive)
@max_inclusive.deleter
def max_inclusive(self):
self._max_inclusive = True
@property
def options(self):
"""
A list of string key-value tuples defining other OPTION
elements for the domain. All options are ignored -- they are
stored for round-tripping purposes only.
"""
return self._options
def parse(self, iterator, config):
if self.ref is not None:
for start, tag, data, pos in iterator:
if start:
warn_or_raise(W44, W44, tag, config, pos)
else:
if tag != 'VALUES':
warn_or_raise(W44, W44, tag, config, pos)
break
else:
for start, tag, data, pos in iterator:
if start:
if tag == 'MIN':
if 'value' not in data:
vo_raise(E09, 'MIN', config, pos)
self.min = data['value']
self.min_inclusive = data.get('inclusive', 'yes')
warn_unknown_attrs(
'MIN', data.keys(), config, pos,
['value', 'inclusive'])
elif tag == 'MAX':
if 'value' not in data:
vo_raise(E09, 'MAX', config, pos)
self.max = data['value']
self.max_inclusive = data.get('inclusive', 'yes')
warn_unknown_attrs(
'MAX', data.keys(), config, pos,
['value', 'inclusive'])
elif tag == 'OPTION':
if 'value' not in data:
vo_raise(E09, 'OPTION', config, pos)
xmlutil.check_token(
data.get('name'), 'name', config, pos)
self.options.append(
(data.get('name'), data.get('value')))
warn_unknown_attrs(
'OPTION', data.keys(), config, pos,
['data', 'name'])
elif tag == 'VALUES':
break
return self
def is_defaults(self):
"""
Are the settings on this ``VALUE`` element all the same as the
XML defaults?
"""
# If there's nothing meaningful or non-default to write,
# don't write anything.
return (self.ref is None and self.null is None and self.ID is None and
self.max is None and self.min is None and self.options == [])
def to_xml(self, w, **kwargs):
def yes_no(value):
if value:
return 'yes'
return 'no'
if self.is_defaults():
return
if self.ref is not None:
w.element('VALUES', attrib=w.object_attrs(self, ['ref']))
else:
with w.tag('VALUES',
attrib=w.object_attrs(
self, ['ID', 'null', 'ref'])):
if self.min is not None:
w.element(
'MIN',
value=self._field.converter.output(self.min, False),
inclusive=yes_no(self.min_inclusive))
if self.max is not None:
w.element(
'MAX',
value=self._field.converter.output(self.max, False),
inclusive=yes_no(self.max_inclusive))
for name, value in self.options:
w.element(
'OPTION',
name=name,
value=value)
def to_table_column(self, column):
# Have the ref filled in here
meta = {}
for key in ['ID', 'null']:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if self.min is not None:
meta['min'] = {
'value': self.min,
'inclusive': self.min_inclusive}
if self.max is not None:
meta['max'] = {
'value': self.max,
'inclusive': self.max_inclusive}
if len(self.options):
meta['options'] = dict(self.options)
column.meta['values'] = meta
def from_table_column(self, column):
if column.info.meta is None or 'values' not in column.info.meta:
return
meta = column.info.meta['values']
for key in ['ID', 'null']:
val = meta.get(key, None)
if val is not None:
setattr(self, key, val)
if 'min' in meta:
self.min = meta['min']['value']
self.min_inclusive = meta['min']['inclusive']
if 'max' in meta:
self.max = meta['max']['value']
self.max_inclusive = meta['max']['inclusive']
if 'options' in meta:
self._options = list(meta['options'].items())
class Field(SimpleElement, _IDProperty, _NameProperty, _XtypeProperty,
_UtypeProperty, _UcdProperty):
"""
FIELD_ element: describes the datatype of a particular column of data.
The keyword arguments correspond to setting members of the same
name, documented below.
If *ID* is provided, it is used for the column name in the
resulting recarray of the table. If no *ID* is provided, *name*
is used instead. If neither is provided, an exception will be
raised.
"""
_attr_list_11 = ['ID', 'name', 'datatype', 'arraysize', 'ucd',
'unit', 'width', 'precision', 'utype', 'ref']
_attr_list_12 = _attr_list_11 + ['xtype']
_element_name = 'FIELD'
def __init__(self, votable, ID=None, name=None, datatype=None,
arraysize=None, ucd=None, unit=None, width=None,
precision=None, utype=None, ref=None, type=None, id=None,
xtype=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs(self._element_name, ['xtype'], config, pos)
# TODO: REMOVE ME ----------------------------------------
# This is a terrible hack to support Simple Image Access
# Protocol results from archive.noao.edu. It creates a field
# for the coordinate projection type of type "double", which
# actually contains character data. We have to hack the field
# to store character data, or we can't read it in. A warning
# will be raised when this happens.
if (not config.get('pedantic') and name == 'cprojection' and
ID == 'cprojection' and ucd == 'VOX:WCS_CoordProjection' and
datatype == 'double'):
datatype = 'char'
arraysize = '3'
vo_warn(W40, (), config, pos)
# ----------------------------------------
self.description = None
self._votable = votable
self.ID = (resolve_id(ID, id, config, pos) or
xmlutil.fix_id(name, config, pos))
self.name = name
if name is None:
if (self._element_name == 'PARAM' and
not config.get('version_1_1_or_later')):
pass
else:
warn_or_raise(W15, W15, self._element_name, config, pos)
self.name = self.ID
if self._ID is None and name is None:
vo_raise(W12, self._element_name, config, pos)
datatype_mapping = {
'string': 'char',
'unicodeString': 'unicodeChar',
'int16': 'short',
'int32': 'int',
'int64': 'long',
'float32': 'float',
'float64': 'double',
# The following appear in some Vizier tables
'unsignedInt': 'long',
'unsignedShort': 'int'
}
datatype_mapping.update(config.get('datatype_mapping', {}))
if datatype in datatype_mapping:
warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]),
config, pos)
datatype = datatype_mapping[datatype]
self.ref = ref
self.datatype = datatype
self.arraysize = arraysize
self.ucd = ucd
self.unit = unit
self.width = width
self.precision = precision
self.utype = utype
self.type = type
self._links = HomogeneousList(Link)
self.title = self.name
self.values = Values(self._votable, self)
self.xtype = xtype
self._setup(config, pos)
warn_unknown_attrs(self._element_name, extra.keys(), config, pos)
@classmethod
def uniqify_names(cls, fields):
"""
Make sure that all names and titles in a list of fields are
unique, by appending numbers if necessary.
"""
unique = {}
for field in fields:
i = 2
new_id = field.ID
while new_id in unique:
new_id = field.ID + "_{:d}".format(i)
i += 1
if new_id != field.ID:
vo_warn(W32, (field.ID, new_id), field._config, field._pos)
field.ID = new_id
unique[new_id] = field.ID
for field in fields:
i = 2
if field.name is None:
new_name = field.ID
implicit = True
else:
new_name = field.name
implicit = False
if new_name != field.ID:
while new_name in unique:
new_name = field.name + " {:d}".format(i)
i += 1
if (not implicit and
new_name != field.name):
vo_warn(W33, (field.name, new_name), field._config, field._pos)
field._unique_name = new_name
unique[new_name] = field.name
def _setup(self, config, pos):
if self.values._ref is not None:
self.values.ref = self.values._ref
self.converter = converters.get_converter(self, config, pos)
@property
def datatype(self):
"""
[*required*] The datatype of the column. Valid values (as
defined by the spec) are:
'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',
'char', 'unicodeChar', 'float', 'double', 'floatComplex', or
'doubleComplex'
Many VOTABLE files in the wild use 'string' instead of 'char',
so that is also a valid option, though 'string' will always be
converted to 'char' when writing the file back out.
"""
return self._datatype
@datatype.setter
def datatype(self, datatype):
if datatype is None:
if self._config.get('version_1_1_or_later'):
warn_or_raise(E10, E10, self._element_name, self._config,
self._pos)
datatype = 'char'
if datatype not in converters.converter_mapping:
vo_raise(E06, (datatype, self.ID), self._config, self._pos)
self._datatype = datatype
@property
def precision(self):
"""
Along with :attr:`width`, defines the `numerical accuracy`_
associated with the data. These values are used to limit the
precision when writing floating point values back to the XML
file. Otherwise, it is purely informational -- the Numpy
recarray containing the data itself does not use this
information.
"""
return self._precision
@precision.setter
def precision(self, precision):
if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision):
vo_raise(E11, precision, self._config, self._pos)
self._precision = precision
@precision.deleter
def precision(self):
self._precision = None
@property
def width(self):
"""
Along with :attr:`precision`, defines the `numerical
accuracy`_ associated with the data. These values are used to
limit the precision when writing floating point values back to
the XML file. Otherwise, it is purely informational -- the
Numpy recarray containing the data itself does not use this
information.
"""
return self._width
@width.setter
def width(self, width):
if width is not None:
width = int(width)
if width <= 0:
vo_raise(E12, width, self._config, self._pos)
self._width = width
@width.deleter
def width(self):
self._width = None
# ref on FIELD and PARAM behave differently than elsewhere -- here
# they're just informational, such as to refer to a coordinate
# system.
@property
def ref(self):
"""
On FIELD_ elements, ref is used only for informational
purposes, for example to refer to a COOSYS_ element.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the FIELD_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from ... import units as u
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(
unit, format=default_format, parse_strict='silent')
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,),
self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(
unit, format=format, parse_strict='silent')
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
@property
def arraysize(self):
"""
Specifies the size of the multidimensional array if this
FIELD_ contains more than a single value.
See `multidimensional arrays`_.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, arraysize):
if (arraysize is not None and
not re.match(r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize)):
vo_raise(E13, arraysize, self._config, self._pos)
self._arraysize = arraysize
@arraysize.deleter
def arraysize(self):
self._arraysize = None
@property
def type(self):
"""
The type attribute on FIELD_ elements is reserved for future
extensions.
"""
return self._type
@type.setter
def type(self, type):
self._type = type
@type.deleter
def type(self):
self._type = None
@property
def values(self):
"""
A :class:`Values` instance (or `None`) defining the domain
of the column.
"""
return self._values
@values.setter
def values(self, values):
assert values is None or isinstance(values, Values)
self._values = values
@values.deleter
def values(self):
self._values = None
@property
def links(self):
"""
A list of :class:`Link` instances used to reference more
details about the meaning of the FIELD_. This is purely
informational and is not used by the `astropy.io.votable`
package.
"""
return self._links
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start:
if tag == 'VALUES':
self.values.__init__(
self._votable, self, config=config, pos=pos, **data)
self.values.parse(iterator, config)
elif tag == 'LINK':
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
elif tag == 'DESCRIPTION':
warn_unknown_attrs(
'DESCRIPTION', data.keys(), config, pos)
elif tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(
W17, W17, self._element_name, config, pos)
self.description = data or None
elif tag == self._element_name:
break
if self.description is not None:
self.title = " ".join(x.strip() for x in
self.description.splitlines())
else:
self.title = self.name
self._setup(config, pos)
return self
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if 'unit' in attrib:
attrib['unit'] = self.unit.to_string('cds')
with w.tag(self._element_name, attrib=attrib):
if self.description is not None:
w.element('DESCRIPTION', self.description, wrap=True)
if not self.values.is_defaults():
self.values.to_xml(w, **kwargs)
for link in self.links:
link.to_xml(w, **kwargs)
def to_table_column(self, column):
"""
Sets the attributes of a given `astropy.table.Column` instance
to match the information in this `Field`.
"""
for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:
val = getattr(self, key, None)
if val is not None:
column.meta[key] = val
if not self.values.is_defaults():
self.values.to_table_column(column)
for link in self.links:
link.to_table_column(column)
if self.description is not None:
column.description = self.description
if self.unit is not None:
# TODO: Use units framework when it's available
column.unit = self.unit
if isinstance(self.converter, converters.FloatingPoint):
column.format = self.converter.output_format
@classmethod
def from_table_column(cls, votable, column):
"""
Restores a `Field` instance from a given
`astropy.table.Column` instance.
"""
kwargs = {}
meta = column.info.meta
if meta:
for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:
val = meta.get(key, None)
if val is not None:
kwargs[key] = val
# TODO: Use the unit framework when available
if column.info.unit is not None:
kwargs['unit'] = column.info.unit
kwargs['name'] = column.info.name
result = converters.table_column_to_votable_datatype(column)
kwargs.update(result)
field = cls(votable, **kwargs)
if column.info.description is not None:
field.description = column.info.description
field.values.from_table_column(column)
if meta and 'links' in meta:
for link in meta['links']:
field.links.append(Link.from_table_column(link))
# TODO: Parse format into precision and width
return field
class Param(Field):
"""
PARAM_ element: constant-valued columns in the data.
:class:`Param` objects are a subclass of :class:`Field`, and have
all of its methods and members. Additionally, it defines :attr:`value`.
"""
_attr_list_11 = Field._attr_list_11 + ['value']
_attr_list_12 = Field._attr_list_12 + ['value']
_element_name = 'PARAM'
def __init__(self, votable, ID=None, name=None, value=None, datatype=None,
arraysize=None, ucd=None, unit=None, width=None,
precision=None, utype=None, type=None, id=None, config=None,
pos=None, **extra):
self._value = value
Field.__init__(self, votable, ID=ID, name=name, datatype=datatype,
arraysize=arraysize, ucd=ucd, unit=unit,
precision=precision, utype=utype, type=type,
id=id, config=config, pos=pos, **extra)
@property
def value(self):
"""
[*required*] The constant value of the parameter. Its type is
determined by the :attr:`~Field.datatype` member.
"""
return self._value
@value.setter
def value(self, value):
if value is None:
value = ""
if isinstance(value, str):
self._value = self.converter.parse(
value, self._config, self._pos)[0]
else:
self._value = value
def _setup(self, config, pos):
Field._setup(self, config, pos)
self.value = self._value
def to_xml(self, w, **kwargs):
tmp_value = self._value
self._value = self.converter.output(tmp_value, False)
# We must always have a value
if self._value is None:
self._value = ""
Field.to_xml(self, w, **kwargs)
self._value = tmp_value
class CooSys(SimpleElement):
"""
COOSYS_ element: defines a coordinate system.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
_attr_list = ['ID', 'equinox', 'epoch', 'system']
_element_name = 'COOSYS'
def __init__(self, ID=None, equinox=None, epoch=None, system=None, id=None,
config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
if config.get('version_1_2_or_later'):
warn_or_raise(W27, W27, (), config, pos)
SimpleElement.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.equinox = equinox
self.epoch = epoch
self.system = system
warn_unknown_attrs('COOSYS', extra.keys(), config, pos)
@property
def ID(self):
"""
[*required*] The XML ID of the COOSYS_ element, used for
cross-referencing. May be `None` or a string conforming to
XML ID_ syntax.
"""
return self._ID
@ID.setter
def ID(self, ID):
if self._config.get('version_1_1_or_later'):
if ID is None:
vo_raise(E15, (), self._config, self._pos)
xmlutil.check_id(ID, 'ID', self._config, self._pos)
self._ID = ID
@property
def system(self):
"""
Specifies the type of coordinate system. Valid choices are:
'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',
'supergalactic', 'xy', 'barycentric', or 'geo_app'
"""
return self._system
@system.setter
def system(self, system):
if system not in ('eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5',
'galactic', 'supergalactic', 'xy', 'barycentric',
'geo_app'):
warn_or_raise(E16, E16, system, self._config, self._pos)
self._system = system
@system.deleter
def system(self):
self._system = None
@property
def equinox(self):
"""
A parameter required to fix the equatorial or ecliptic systems
(as e.g. "J2000" as the default "eq_FK5" or "B1950" as the
default "eq_FK4").
"""
return self._equinox
@equinox.setter
def equinox(self, equinox):
check_astroyear(equinox, 'equinox', self._config, self._pos)
self._equinox = equinox
@equinox.deleter
def equinox(self):
self._equinox = None
@property
def epoch(self):
"""
Specifies the epoch of the positions. It must be a string
specifying an astronomical year.
"""
return self._epoch
@epoch.setter
def epoch(self, epoch):
check_astroyear(epoch, 'epoch', self._config, self._pos)
self._epoch = epoch
@epoch.deleter
def epoch(self):
self._epoch = None
class FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.
"""
_attr_list_11 = ['ref']
_attr_list_12 = _attr_list_11 + ['ucd', 'utype']
_element_name = "FIELDref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None,
**extra):
"""
*table* is the :class:`Table` object that this :class:`FieldRef`
is a member of.
*ref* is the ID to reference a :class:`Field` object defined
elsewhere.
"""
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ['utype'], config, pos)
@property
def ref(self):
"""The ID_ of the FIELD_ that this FIELDref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Field` instance that this :class:`FieldRef`
references.
"""
for field in self._table._votable.iter_fields_and_params():
if isinstance(field, Field) and field.ID == self.ref:
return field
vo_raise(
"No field named '{}'".format(self.ref),
self._config, self._pos, KeyError)
class ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):
"""
PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
It contains the following publicly-accessible members:
*ref*: An XML ID referring to a <PARAM> element.
"""
_attr_list_11 = ['ref']
_attr_list_12 = _attr_list_11 + ['ucd', 'utype']
_element_name = "PARAMref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get('version_1_2_or_later'):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ['ucd'], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ['utype'], config, pos)
@property
def ref(self):
"""The ID_ of the PARAM_ that this PARAMref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Param` instance that this :class:``PARAMref``
references.
"""
for param in self._table._votable.iter_fields_and_params():
if isinstance(param, Param) and param.ID == self.ref:
return param
vo_raise(
"No params named '{}'".format(self.ref),
self._config, self._pos, KeyError)
class Group(Element, _IDProperty, _NameProperty, _UtypeProperty,
_UcdProperty, _DescriptionProperty):
"""
GROUP_ element: groups FIELD_ and PARAM_ elements.
This information is currently ignored by the vo package---that is
the columns in the recarray are always flat---but the grouping
information is stored so that it can be written out again to the
XML file.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, table, ID=None, name=None, ref=None, ucd=None,
utype=None, id=None, config=None, pos=None, **extra):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self._table = table
self.ID = (resolve_id(ID, id, config, pos)
or xmlutil.fix_id(name, config, pos))
self.name = name
self.ref = ref
self.ucd = ucd
self.utype = utype
self.description = None
self._entries = HomogeneousList(
(FieldRef, ParamRef, Group, Param))
warn_unknown_attrs('GROUP', extra.keys(), config, pos)
def __repr__(self):
return '<GROUP>... {0} entries ...</GROUP>'.format(len(self._entries))
@property
def ref(self):
"""
Currently ignored, as it's not clear from the spec how this is
meant to work.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, 'ref', self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def entries(self):
"""
[read-only] A list of members of the GROUP_. This list may
only contain objects of type :class:`Param`, :class:`Group`,
:class:`ParamRef` and :class:`FieldRef`.
"""
return self._entries
def _add_fieldref(self, iterator, tag, data, config, pos):
fieldref = FieldRef(self._table, config=config, pos=pos, **data)
self.entries.append(fieldref)
def _add_paramref(self, iterator, tag, data, config, pos):
paramref = ParamRef(self._table, config=config, pos=pos, **data)
self.entries.append(paramref)
def _add_param(self, iterator, tag, data, config, pos):
if isinstance(self._table, VOTableFile):
votable = self._table
else:
votable = self._table._votable
param = Param(votable, config=config, pos=pos, **data)
self.entries.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self._table, config=config, pos=pos, **data)
self.entries.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
tag_mapping = {
'FIELDref': self._add_fieldref,
'PARAMref': self._add_paramref,
'PARAM': self._add_param,
'GROUP': self._add_group,
'DESCRIPTION': self._ignore_add}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'GROUP', config, pos)
self.description = data or None
elif tag == 'GROUP':
break
return self
def to_xml(self, w, **kwargs):
with w.tag(
'GROUP',
attrib=w.object_attrs(
self, ['ID', 'name', 'ref', 'ucd', 'utype'])):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for entry in self.entries:
entry.to_xml(w, **kwargs)
def iter_fields_and_params(self):
"""
Recursively iterate over all :class:`Param` elements in this
:class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Param):
yield entry
elif isinstance(entry, Group):
for field in entry.iter_fields_and_params():
yield field
def iter_groups(self):
"""
Recursively iterate over all sub-:class:`Group` instances in
this :class:`Group`.
"""
for entry in self.entries:
if isinstance(entry, Group):
yield entry
for group in entry.iter_groups():
yield group
class Table(Element, _IDProperty, _NameProperty, _UcdProperty,
_DescriptionProperty):
"""
TABLE_ element: optionally contains data.
It contains the following publicly-accessible and mutable
attribute:
*array*: A Numpy masked array of the data itself, where each
row is a row of votable data, and columns are named and typed
based on the <FIELD> elements of the table. The mask is
parallel to the data array, except for variable-length fields.
For those fields, the numpy array's column type is "object"
(``"O"``), and another masked array is stored there.
If the Table contains no data, (for example, its enclosing
:class:`Resource` has :attr:`~Resource.type` == 'meta') *array*
will have zero-length.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, votable, ID=None, name=None, ref=None, ucd=None,
utype=None, nrows=None, id=None, config=None, pos=None,
**extra):
if config is None:
config = {}
self._config = config
self._pos = pos
self._empty = False
Element.__init__(self)
self._votable = votable
self.ID = (resolve_id(ID, id, config, pos)
or xmlutil.fix_id(name, config, pos))
self.name = name
xmlutil.check_id(ref, 'ref', config, pos)
self._ref = ref
self.ucd = ucd
self.utype = utype
if nrows is not None:
nrows = int(nrows)
if nrows < 0:
raise ValueError("'nrows' cannot be negative.")
self._nrows = nrows
self.description = None
self.format = 'tabledata'
self._fields = HomogeneousList(Field)
self._params = HomogeneousList(Param)
self._groups = HomogeneousList(Group)
self._links = HomogeneousList(Link)
self._infos = HomogeneousList(Info)
self.array = ma.array([])
warn_unknown_attrs('TABLE', extra.keys(), config, pos)
def __repr__(self):
return repr(self.to_table())
def __bytes__(self):
return bytes(self.to_table())
def __str__(self):
return str(self.to_table())
@property
def ref(self):
return self._ref
@ref.setter
def ref(self, ref):
"""
Refer to another TABLE, previously defined, by the *ref* ID_
for all metadata (FIELD_, PARAM_ etc.) information.
"""
# When the ref changes, we want to verify that it will work
# by actually going and looking for the referenced table.
# If found, set a bunch of properties in this table based
# on the other one.
xmlutil.check_id(ref, 'ref', self._config, self._pos)
if ref is not None:
try:
table = self._votable.get_table_by_id(ref, before=self)
except KeyError:
warn_or_raise(
W43, W43, ('TABLE', self.ref), self._config, self._pos)
ref = None
else:
self._fields = table.fields
self._params = table.params
self._groups = table.groups
self._links = table.links
else:
del self._fields[:]
del self._params[:]
del self._groups[:]
del self._links[:]
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def format(self):
"""
[*required*] The serialization format of the table. Must be
one of:
'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)
'fits' (FITS_).
Note that the 'fits' format, since it requires an external
file, can not be written out. Any file read in with 'fits'
format will be read out, by default, in 'tabledata' format.
See :ref:`votable-serialization`.
"""
return self._format
@format.setter
def format(self, format):
format = format.lower()
if format == 'fits':
vo_raise("fits format can not be written out, only read.",
self._config, self._pos, NotImplementedError)
if format == 'binary2':
if not self._config['version_1_3_or_later']:
vo_raise(
"binary2 only supported in votable 1.3 or later",
self._config, self._pos)
elif format not in ('tabledata', 'binary'):
vo_raise("Invalid format '{}'".format(format),
self._config, self._pos)
self._format = format
@property
def nrows(self):
"""
[*immutable*] The number of rows in the table, as specified in
the XML file.
"""
return self._nrows
@property
def fields(self):
"""
A list of :class:`Field` objects describing the types of each
of the data columns.
"""
return self._fields
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
table. Must contain only :class:`Param` objects.
"""
return self._params
@property
def groups(self):
"""
A list of :class:`Group` objects describing how the columns
and parameters are grouped. Currently this information is
only kept around for round-tripping and informational
purposes.
"""
return self._groups
@property
def links(self):
"""
A list of :class:`Link` objects (pointers to other documents
or servers through a URI) for the table.
"""
return self._links
@property
def infos(self):
"""
A list of :class:`Info` objects for the table. Allows for
post-operational diagnostics.
"""
return self._infos
def is_empty(self):
"""
Returns True if this table doesn't contain any real data
because it was skipped over by the parser (through use of the
``table_number`` kwarg).
"""
return self._empty
def create_arrays(self, nrows=0, config=None):
"""
Create a new array to hold the data based on the current set
of fields, and store them in the *array* and member variable.
Any data in the existing array will be lost.
*nrows*, if provided, is the number of rows to allocate.
"""
if nrows is None:
nrows = 0
fields = self.fields
if len(fields) == 0:
array = np.recarray((nrows,), dtype='O')
mask = np.zeros((nrows,), dtype='b')
else:
# for field in fields: field._setup(config)
Field.uniqify_names(fields)
dtype = []
for x in fields:
if x._unique_name == x.ID:
id = x.ID
else:
id = (x._unique_name, x.ID)
dtype.append((id, x.converter.format))
array = np.recarray((nrows,), dtype=np.dtype(dtype))
descr_mask = []
for d in array.dtype.descr:
new_type = (d[1][1] == 'O' and 'O') or 'bool'
if len(d) == 2:
descr_mask.append((d[0], new_type))
elif len(d) == 3:
descr_mask.append((d[0], new_type, d[2]))
mask = np.zeros((nrows,), dtype=descr_mask)
self.array = ma.array(array, mask=mask)
def _resize_strategy(self, size):
"""
Return a new (larger) size based on size, used for
reallocating an array when it fills up. This is in its own
function so the resizing strategy can be easily replaced.
"""
# Once we go beyond 0, make a big step -- after that use a
# factor of 1.5 to help keep memory usage compact
if size == 0:
return 512
return int(np.ceil(size * RESIZE_AMOUNT))
def _add_field(self, iterator, tag, data, config, pos):
field = Field(self._votable, config=config, pos=pos, **data)
self.fields.append(field)
field.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
if not config.get('version_1_2_or_later'):
warn_or_raise(W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def parse(self, iterator, config):
columns = config.get('columns')
# If we've requested to read in only a specific table, skip
# all others
table_number = config.get('table_number')
current_table_number = config.get('_current_table_number')
skip_table = False
if current_table_number is not None:
config['_current_table_number'] += 1
if (table_number is not None and
table_number != current_table_number):
skip_table = True
self._empty = True
table_id = config.get('table_id')
if table_id is not None:
if table_id != self.ID:
skip_table = True
self._empty = True
if self.ref is not None:
# This table doesn't have its own datatype descriptors, it
# just references those from another table.
# This is to call the property setter to go and get the
# referenced information
self.ref = self.ref
for start, tag, data, pos in iterator:
if start:
if tag == 'DATA':
warn_unknown_attrs(
'DATA', data.keys(), config, pos)
break
else:
if tag == 'TABLE':
return self
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
else:
tag_mapping = {
'FIELD': self._add_field,
'PARAM': self._add_param,
'GROUP': self._add_group,
'LINK': self._add_link,
'INFO': self._add_info,
'DESCRIPTION': self._ignore_add}
for start, tag, data, pos in iterator:
if start:
if tag == 'DATA':
warn_unknown_attrs(
'DATA', data.keys(), config, pos)
break
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
else:
if tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
elif tag == 'TABLE':
# For error checking purposes
Field.uniqify_names(self.fields)
# We still need to create arrays, even if the file
# contains no DATA section
self.create_arrays(nrows=0, config=config)
return self
self.create_arrays(nrows=self._nrows, config=config)
fields = self.fields
names = [x.ID for x in fields]
# Deal with a subset of the columns, if requested.
if not columns:
colnumbers = list(range(len(fields)))
else:
if isinstance(columns, str):
columns = [columns]
columns = np.asarray(columns)
if issubclass(columns.dtype.type, np.integer):
if np.any(columns < 0) or np.any(columns > len(fields)):
raise ValueError(
"Some specified column numbers out of range")
colnumbers = columns
elif issubclass(columns.dtype.type, np.character):
try:
colnumbers = [names.index(x) for x in columns]
except ValueError:
raise ValueError(
"Columns '{}' not found in fields list".format(columns))
else:
raise TypeError("Invalid columns list")
if not skip_table:
for start, tag, data, pos in iterator:
if start:
if tag == 'TABLEDATA':
warn_unknown_attrs(
'TABLEDATA', data.keys(), config, pos)
self.array = self._parse_tabledata(
iterator, colnumbers, fields, config)
break
elif tag == 'BINARY':
warn_unknown_attrs(
'BINARY', data.keys(), config, pos)
self.array = self._parse_binary(
1, iterator, colnumbers, fields, config, pos)
break
elif tag == 'BINARY2':
if not config['version_1_3_or_later']:
warn_or_raise(
W52, W52, config['version'], config, pos)
self.array = self._parse_binary(
2, iterator, colnumbers, fields, config, pos)
break
elif tag == 'FITS':
warn_unknown_attrs(
'FITS', data.keys(), config, pos, ['extnum'])
try:
extnum = int(data.get('extnum', 0))
if extnum < 0:
raise ValueError("'extnum' cannot be negative.")
except ValueError:
vo_raise(E17, (), config, pos)
self.array = self._parse_fits(
iterator, extnum, config)
break
else:
warn_or_raise(W37, W37, tag, config, pos)
break
for start, tag, data, pos in iterator:
if not start and tag == 'DATA':
break
for start, tag, data, pos in iterator:
if start and tag == 'INFO':
if not config.get('version_1_2_or_later'):
warn_or_raise(
W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
elif not start and tag == 'TABLE':
break
return self
def _parse_tabledata(self, iterator, colnumbers, fields, config):
# Since we don't know the number of rows up front, we'll
# reallocate the record array to make room as we go. This
# prevents the need to scan through the XML twice. The
# allocation is by factors of 1.5.
invalid = config.get('invalid', 'exception')
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
parsers = [field.converter.parse for field in fields]
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
colnumbers_bits = [i in colnumbers for i in range(len(fields))]
row_default = [x.converter.default for x in fields]
mask_default = [True] * len(fields)
array_chunk = []
mask_chunk = []
chunk_size = config.get('chunk_size', DEFAULT_CHUNK_SIZE)
for start, tag, data, pos in iterator:
if tag == 'TR':
# Now parse one row
row = row_default[:]
row_mask = mask_default[:]
i = 0
for start, tag, data, pos in iterator:
if start:
binary = (data.get('encoding', None) == 'base64')
warn_unknown_attrs(
tag, data.keys(), config, pos, ['encoding'])
else:
if tag == 'TD':
if i >= len(fields):
vo_raise(E20, len(fields), config, pos)
if colnumbers_bits[i]:
try:
if binary:
rawdata = base64.b64decode(
data.encode('ascii'))
buf = io.BytesIO(rawdata)
buf.seek(0)
try:
value, mask_value = binparsers[i](
buf.read)
except Exception as e:
vo_reraise(
e, config, pos,
"(in row {:d}, col '{}')".format(
len(array_chunk),
fields[i].ID))
else:
try:
value, mask_value = parsers[i](
data, config, pos)
except Exception as e:
vo_reraise(
e, config, pos,
"(in row {:d}, col '{}')".format(
len(array_chunk),
fields[i].ID))
except Exception as e:
if invalid == 'exception':
vo_reraise(e, config, pos)
else:
row[i] = value
row_mask[i] = mask_value
elif tag == 'TR':
break
else:
self._add_unknown_tag(
iterator, tag, data, config, pos)
i += 1
if i < len(fields):
vo_raise(E21, (i, len(fields)), config, pos)
array_chunk.append(tuple(row))
mask_chunk.append(tuple(row_mask))
if len(array_chunk) == chunk_size:
while numrows + chunk_size > alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
if alloc_rows != len(array):
array = _resize(array, alloc_rows)
array[numrows:numrows + chunk_size] = array_chunk
array.mask[numrows:numrows + chunk_size] = mask_chunk
numrows += chunk_size
array_chunk = []
mask_chunk = []
elif not start and tag == 'TABLEDATA':
break
# Now, resize the array to the exact number of rows we need and
# put the last chunk values in there.
alloc_rows = numrows + len(array_chunk)
array = _resize(array, alloc_rows)
array[numrows:] = array_chunk
if alloc_rows != 0:
array.mask[numrows:] = mask_chunk
numrows += len(array_chunk)
if (self.nrows is not None and
self.nrows >= 0 and
self.nrows != numrows):
warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)
self._nrows = numrows
return array
def _get_binary_data_stream(self, iterator, config):
have_local_stream = False
for start, tag, data, pos in iterator:
if tag == 'STREAM':
if start:
warn_unknown_attrs(
'STREAM', data.keys(), config, pos,
['type', 'href', 'actuate', 'encoding', 'expires',
'rights'])
if 'href' not in data:
have_local_stream = True
if data.get('encoding', None) != 'base64':
warn_or_raise(
W38, W38, data.get('encoding', None),
config, pos)
else:
href = data['href']
xmlutil.check_anyuri(href, config, pos)
encoding = data.get('encoding', None)
else:
buffer = data
break
if have_local_stream:
buffer = base64.b64decode(buffer.encode('ascii'))
string_io = io.BytesIO(buffer)
string_io.seek(0)
read = string_io.read
else:
if not href.startswith(('http', 'ftp', 'file')):
vo_raise(
"The vo package only supports remote data through http, " +
"ftp or file",
self._config, self._pos, NotImplementedError)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == 'gzip':
fd = gzip.GzipFile(href, 'rb', fileobj=fd)
elif encoding == 'base64':
fd = codecs.EncodedFile(fd, 'base64')
else:
vo_raise(
"Unknown encoding type '{}'".format(encoding),
self._config, self._pos, NotImplementedError)
read = fd.read
def careful_read(length):
result = read(length)
if len(result) != length:
raise EOFError
return result
return careful_read
def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):
fields = self.fields
careful_read = self._get_binary_data_stream(iterator, config)
# Need to have only one reference so that we can resize the
# array
array = self.array
del self.array
binparsers = [field.converter.binparse for field in fields]
numrows = 0
alloc_rows = len(array)
while True:
# Resize result arrays if necessary
if numrows >= alloc_rows:
alloc_rows = self._resize_strategy(alloc_rows)
array = _resize(array, alloc_rows)
row_data = []
row_mask_data = []
try:
if mode == 2:
mask_bits = careful_read(int((len(fields) + 7) / 8))
row_mask_data = list(converters.bitarray_to_bool(
mask_bits, len(fields)))
for i, binparse in enumerate(binparsers):
try:
value, value_mask = binparse(careful_read)
except EOFError:
raise
except Exception as e:
vo_reraise(
e, config, pos, "(in row {:d}, col '{}')".format(
numrows, fields[i].ID))
row_data.append(value)
if mode == 1:
row_mask_data.append(value_mask)
else:
row_mask_data[i] = row_mask_data[i] or value_mask
except EOFError:
break
row = [x.converter.default for x in fields]
row_mask = [False] * len(fields)
for i in colnumbers:
row[i] = row_data[i]
row_mask[i] = row_mask_data[i]
array[numrows] = tuple(row)
array.mask[numrows] = tuple(row_mask)
numrows += 1
array = _resize(array, numrows)
return array
def _parse_fits(self, iterator, extnum, config):
for start, tag, data, pos in iterator:
if tag == 'STREAM':
if start:
warn_unknown_attrs(
'STREAM', data.keys(), config, pos,
['type', 'href', 'actuate', 'encoding', 'expires',
'rights'])
href = data['href']
encoding = data.get('encoding', None)
else:
break
if not href.startswith(('http', 'ftp', 'file')):
vo_raise(
"The vo package only supports remote data through http, "
"ftp or file",
self._config, self._pos, NotImplementedError)
fd = urllib.request.urlopen(href)
if encoding is not None:
if encoding == 'gzip':
fd = gzip.GzipFile(href, 'r', fileobj=fd)
elif encoding == 'base64':
fd = codecs.EncodedFile(fd, 'base64')
else:
vo_raise(
"Unknown encoding type '{}'".format(encoding),
self._config, self._pos, NotImplementedError)
hdulist = fits.open(fd)
array = hdulist[int(extnum)].data
if array.dtype != self.array.dtype:
warn_or_raise(W19, W19, (), self._config, self._pos)
return array
def to_xml(self, w, **kwargs):
specified_format = kwargs.get('tabledata_format')
if specified_format is not None:
format = specified_format
else:
format = self.format
if format == 'fits':
format = 'tabledata'
with w.tag(
'TABLE',
attrib=w.object_attrs(
self,
('ID', 'name', 'ref', 'ucd', 'utype', 'nrows'))):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.fields, self.params):
for element in element_set:
element._setup({}, None)
if self.ref is None:
for element_set in (self.fields, self.params, self.groups,
self.links):
for element in element_set:
element.to_xml(w, **kwargs)
elif kwargs['version_1_2_or_later']:
index = list(self._votable.iter_tables()).index(self)
group = Group(self, ID="_g{0}".format(index))
group.to_xml(w, **kwargs)
if len(self.array):
with w.tag('DATA'):
if format == 'tabledata':
self._write_tabledata(w, **kwargs)
elif format == 'binary':
self._write_binary(1, w, **kwargs)
elif format == 'binary2':
self._write_binary(2, w, **kwargs)
if kwargs['version_1_2_or_later']:
for element in self._infos:
element.to_xml(w, **kwargs)
def _write_tabledata(self, w, **kwargs):
fields = self.fields
array = self.array
with w.tag('TABLEDATA'):
w._flush()
if (_has_c_tabledata_writer and
not kwargs.get('_debug_python_based_parser')):
supports_empty_values = [
field.converter.supports_empty_values(kwargs)
for field in fields]
fields = [field.converter.output for field in fields]
indent = len(w._tags) - 1
tablewriter.write_tabledata(
w.write, array.data, array.mask, fields,
supports_empty_values, indent, 1 << 8)
else:
write = w.write
indent_spaces = w.get_indentation_spaces()
tr_start = indent_spaces + "<TR>\n"
tr_end = indent_spaces + "</TR>\n"
td = indent_spaces + " <TD>{}</TD>\n"
td_empty = indent_spaces + " <TD/>\n"
fields = [(i, field.converter.output,
field.converter.supports_empty_values(kwargs))
for i, field in enumerate(fields)]
for row in range(len(array)):
write(tr_start)
array_row = array.data[row]
mask_row = array.mask[row]
for i, output, supports_empty_values in fields:
data = array_row[i]
masked = mask_row[i]
if supports_empty_values and np.all(masked):
write(td_empty)
else:
try:
val = output(data, masked)
except Exception as e:
vo_reraise(
e,
additional="(in row {:d}, col '{}')".format(
row, self.fields[i].ID))
if len(val):
write(td.format(val))
else:
write(td_empty)
write(tr_end)
def _write_binary(self, mode, w, **kwargs):
fields = self.fields
array = self.array
if mode == 1:
tag_name = 'BINARY'
else:
tag_name = 'BINARY2'
with w.tag(tag_name):
with w.tag('STREAM', encoding='base64'):
fields_basic = [(i, field.converter.binoutput)
for (i, field) in enumerate(fields)]
data = io.BytesIO()
for row in range(len(array)):
array_row = array.data[row]
array_mask = array.mask[row]
if mode == 2:
flattened = np.array([np.all(x) for x in array_mask])
data.write(converters.bool_to_bitarray(flattened))
for i, converter in fields_basic:
try:
chunk = converter(array_row[i], array_mask[i])
assert type(chunk) == bytes
except Exception as e:
vo_reraise(
e, additional="(in row {:d}, col '{}')".format(
row, fields[i].ID))
data.write(chunk)
w._flush()
w.write(base64.b64encode(data.getvalue()).decode('ascii'))
def to_table(self, use_names_over_ids=False):
"""
Convert this VO Table to an `astropy.table.Table` instance.
Parameters
----------
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the
names of columns in the `astropy.table.Table` instance.
Since names are not guaranteed to be unique, this may cause
some columns to be renamed by appending numbers to the end.
Otherwise (default), use the ID attributes as the column
names.
.. warning::
Variable-length array fields may not be restored
identically when round-tripping through the
`astropy.table.Table` instance.
"""
from ...table import Table
meta = {}
for key in ['ID', 'name', 'ref', 'ucd', 'utype', 'description']:
val = getattr(self, key, None)
if val is not None:
meta[key] = val
if use_names_over_ids:
names = [field.name for field in self.fields]
unique_names = []
for i, name in enumerate(names):
new_name = name
i = 2
while new_name in unique_names:
new_name = '{0}{1}'.format(name, i)
i += 1
unique_names.append(new_name)
names = unique_names
else:
names = [field.ID for field in self.fields]
table = Table(self.array, names=names, meta=meta)
for name, field in zip(names, self.fields):
column = table[name]
field.to_table_column(column)
return table
@classmethod
def from_table(cls, votable, table):
"""
Create a `Table` instance from a given `astropy.table.Table`
instance.
"""
kwargs = {}
for key in ['ID', 'name', 'ref', 'ucd', 'utype']:
val = table.meta.get(key)
if val is not None:
kwargs[key] = val
new_table = cls(votable, **kwargs)
if 'description' in table.meta:
new_table.description = table.meta['description']
for colname in table.colnames:
column = table[colname]
new_table.fields.append(Field.from_table_column(votable, column))
if table.mask is None:
new_table.array = ma.array(np.asarray(table))
else:
new_table.array = ma.array(np.asarray(table),
mask=np.asarray(table.mask))
return new_table
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD and PARAM elements in the
TABLE.
"""
for param in self.params:
yield param
for field in self.fields:
yield field
for group in self.groups:
for field in group.iter_fields_and_params():
yield field
get_field_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given ID.
""")
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given ID or name.
""")
get_fields_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_fields_and_params', 'FIELD or PARAM',
"""
Looks up a FIELD or PARAM element by the given utype and
returns an iterator emitting all matches.
""")
def iter_groups(self):
"""
Recursively iterate over all GROUP elements in the TABLE.
"""
for group in self.groups:
yield group
for g in group.iter_groups():
yield g
get_group_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_groups', 'GROUP',
"""
Looks up a GROUP element by the given ID. Used by the group's
"ref" attribute
""")
get_groups_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_groups', 'GROUP',
"""
Looks up a GROUP element by the given utype and returns an
iterator emitting all matches.
""")
def iter_info(self):
for info in self.infos:
yield info
class Resource(Element, _IDProperty, _NameProperty, _UtypeProperty,
_DescriptionProperty):
"""
RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.
The keyword arguments correspond to setting members of the same
name, documented below.
"""
def __init__(self, name=None, ID=None, utype=None, type='results',
id=None, config=None, pos=None, **kwargs):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.name = name
self.ID = resolve_id(ID, id, config, pos)
self.utype = utype
self.type = type
self._extra_attributes = kwargs
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._groups = HomogeneousList(Group)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._links = HomogeneousList(Link)
self._tables = HomogeneousList(Table)
self._resources = HomogeneousList(Resource)
warn_unknown_attrs('RESOURCE', kwargs.keys(), config, pos)
def __repr__(self):
buff = io.StringIO()
w = XMLWriter(buff)
w.element(
self._element_name,
attrib=w.object_attrs(self, self._attr_list))
return buff.getvalue().strip()
@property
def type(self):
"""
[*required*] The type of the resource. Must be either:
- 'results': This resource contains actual result values
(default)
- 'meta': This resource contains only datatype descriptions
(FIELD_ elements), but no actual data.
"""
return self._type
@type.setter
def type(self, type):
if type not in ('results', 'meta'):
vo_raise(E18, type, self._config, self._pos)
self._type = type
@property
def extra_attributes(self):
"""
A dictionary of string keys to string values containing any
extra attributes of the RESOURCE_ element that are not defined
in the specification. (The specification explicitly allows
for extra attributes here, but nowhere else.)
"""
return self._extra_attributes
@property
def coordinate_systems(self):
"""
A list of coordinate system definitions (COOSYS_ elements) for
the RESOURCE_. Must contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
resource. Must only contain `Info` objects.
"""
return self._infos
@property
def groups(self):
"""
A list of groups
"""
return self._groups
@property
def params(self):
"""
A list of parameters (constant-valued columns) for the
resource. Must contain only `Param` objects.
"""
return self._params
@property
def links(self):
"""
A list of links (pointers to other documents or servers
through a URI) for the resource. Must contain only `Link`
objects.
"""
return self._links
@property
def tables(self):
"""
A list of tables in the resource. Must contain only
`Table` objects.
"""
return self._tables
@property
def resources(self):
"""
A list of nested resources inside this resource. Must contain
only `Resource` objects.
"""
return self._resources
def _add_table(self, iterator, tag, data, config, pos):
table = Table(self._votable, config=config, pos=pos, **data)
self.tables.append(table)
table.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self._votable, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self._votable, iterator, config)
def _add_link(self, iterator, tag, data, config, pos):
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
def parse(self, votable, iterator, config):
self._votable = votable
tag_mapping = {
'TABLE': self._add_table,
'INFO': self._add_info,
'PARAM': self._add_param,
'GROUP' : self._add_group,
'COOSYS': self._add_coosys,
'RESOURCE': self._add_resource,
'LINK': self._add_link,
'DESCRIPTION': self._ignore_add
}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'RESOURCE', config, pos)
self.description = data or None
elif tag == 'RESOURCE':
break
del self._votable
return self
def to_xml(self, w, **kwargs):
attrs = w.object_attrs(self, ('ID', 'type', 'utype'))
attrs.update(self.extra_attributes)
with w.tag('RESOURCE', attrib=attrs):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
for element_set in (self.coordinate_systems, self.params,
self.infos, self.links, self.tables,
self.resources):
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Recursively iterates over all tables in the resource and
nested resources.
"""
for table in self.tables:
yield table
for resource in self.resources:
for table in resource.iter_tables():
yield table
def iter_fields_and_params(self):
"""
Recursively iterates over all FIELD_ and PARAM_ elements in
the resource, its tables and nested resources.
"""
for param in self.params:
yield param
for table in self.tables:
for param in table.iter_fields_and_params():
yield param
for resource in self.resources:
for param in resource.iter_fields_and_params():
yield param
def iter_coosys(self):
"""
Recursively iterates over all the COOSYS_ elements in the
resource and nested resources.
"""
for coosys in self.coordinate_systems:
yield coosys
for resource in self.resources:
for coosys in resource.iter_coosys():
yield coosys
def iter_info(self):
"""
Recursively iterates over all the INFO_ elements in the
resource and nested resources.
"""
for info in self.infos:
yield info
for table in self.tables:
for info in table.iter_info():
yield info
for resource in self.resources:
for info in resource.iter_info():
yield info
class VOTableFile(Element, _IDProperty, _DescriptionProperty):
"""
VOTABLE_ element: represents an entire file.
The keyword arguments correspond to setting members of the same
name, documented below.
*version* is settable at construction time only, since conformance
tests for building the rest of the structure depend on it.
"""
def __init__(self, ID=None, id=None, config=None, pos=None, version="1.3"):
if config is None:
config = {}
self._config = config
self._pos = pos
Element.__init__(self)
self.ID = resolve_id(ID, id, config, pos)
self.description = None
self._coordinate_systems = HomogeneousList(CooSys)
self._params = HomogeneousList(Param)
self._infos = HomogeneousList(Info)
self._resources = HomogeneousList(Resource)
self._groups = HomogeneousList(Group)
version = str(version)
if version not in ("1.0", "1.1", "1.2", "1.3"):
raise ValueError("'version' should be one of '1.0', '1.1', "
"'1.2', or '1.3'")
self._version = version
def __repr__(self):
n_tables = len(list(self.iter_tables()))
return '<VOTABLE>... {0} tables ...</VOTABLE>'.format(n_tables)
@property
def version(self):
"""
The version of the VOTable specification that the file uses.
"""
return self._version
@version.setter
def version(self, version):
version = str(version)
if version not in ('1.1', '1.2', '1.3'):
raise ValueError(
"astropy.io.votable only supports VOTable versions "
"1.1, 1.2 and 1.3")
self._version = version
@property
def coordinate_systems(self):
"""
A list of coordinate system descriptions for the file. Must
contain only `CooSys` objects.
"""
return self._coordinate_systems
@property
def params(self):
"""
A list of parameters (constant-valued columns) that apply to
the entire file. Must contain only `Param` objects.
"""
return self._params
@property
def infos(self):
"""
A list of informational parameters (key-value pairs) for the
entire file. Must only contain `Info` objects.
"""
return self._infos
@property
def resources(self):
"""
A list of resources, in the order they appear in the file.
Must only contain `Resource` objects.
"""
return self._resources
@property
def groups(self):
"""
A list of groups, in the order they appear in the file. Only
supported as a child of the VOTABLE element in VOTable 1.2 or
later.
"""
return self._groups
def _add_param(self, iterator, tag, data, config, pos):
param = Param(self, config=config, pos=pos, **data)
self.params.append(param)
param.parse(iterator, config)
def _add_resource(self, iterator, tag, data, config, pos):
resource = Resource(config=config, pos=pos, **data)
self.resources.append(resource)
resource.parse(self, iterator, config)
def _add_coosys(self, iterator, tag, data, config, pos):
coosys = CooSys(config=config, pos=pos, **data)
self.coordinate_systems.append(coosys)
coosys.parse(iterator, config)
def _add_info(self, iterator, tag, data, config, pos):
info = Info(config=config, pos=pos, **data)
self.infos.append(info)
info.parse(iterator, config)
def _add_group(self, iterator, tag, data, config, pos):
if not config.get('version_1_2_or_later'):
warn_or_raise(W26, W26, ('GROUP', 'VOTABLE', '1.2'), config, pos)
group = Group(self, config=config, pos=pos, **data)
self.groups.append(group)
group.parse(iterator, config)
def parse(self, iterator, config):
config['_current_table_number'] = 0
for start, tag, data, pos in iterator:
if start:
if tag == 'xml':
pass
elif tag == 'VOTABLE':
if 'version' not in data:
warn_or_raise(W20, W20, self.version, config, pos)
config['version'] = self.version
else:
config['version'] = self._version = data['version']
if config['version'].lower().startswith('v'):
warn_or_raise(
W29, W29, config['version'], config, pos)
self._version = config['version'] = \
config['version'][1:]
if config['version'] not in ('1.1', '1.2', '1.3'):
vo_warn(W21, config['version'], config, pos)
if 'xmlns' in data:
correct_ns = ('http://www.ivoa.net/xml/VOTable/v{}'.format(
config['version']))
if data['xmlns'] != correct_ns:
vo_warn(
W41, (correct_ns, data['xmlns']), config, pos)
else:
vo_warn(W42, (), config, pos)
break
else:
vo_raise(E19, (), config, pos)
config['version_1_1_or_later'] = \
util.version_compare(config['version'], '1.1') >= 0
config['version_1_2_or_later'] = \
util.version_compare(config['version'], '1.2') >= 0
config['version_1_3_or_later'] = \
util.version_compare(config['version'], '1.3') >= 0
tag_mapping = {
'PARAM': self._add_param,
'RESOURCE': self._add_resource,
'COOSYS': self._add_coosys,
'INFO': self._add_info,
'DEFINITIONS': self._add_definitions,
'DESCRIPTION': self._ignore_add,
'GROUP': self._add_group}
for start, tag, data, pos in iterator:
if start:
tag_mapping.get(tag, self._add_unknown_tag)(
iterator, tag, data, config, pos)
elif tag == 'DESCRIPTION':
if self.description is not None:
warn_or_raise(W17, W17, 'VOTABLE', config, pos)
self.description = data or None
if not len(self.resources) and config['version_1_2_or_later']:
warn_or_raise(W53, W53, (), config, pos)
return self
def to_xml(self, fd, compressed=False, tabledata_format=None,
_debug_python_based_parser=False, _astropy_version=None):
"""
Write to an XML file.
Parameters
----------
fd : str path or writable file-like object
Where to write the file.
compressed : bool, optional
When `True`, write to a gzip-compressed file. (Default:
`False`)
tabledata_format : str, optional
Override the format of the table(s) data to write. Must
be one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified
in each `Table` object as it was created or read in. See
:ref:`votable-serialization`.
"""
if tabledata_format is not None:
if tabledata_format.lower() not in (
'tabledata', 'binary', 'binary2'):
raise ValueError("Unknown format type '{0}'".format(format))
kwargs = {
'version': self.version,
'version_1_1_or_later':
util.version_compare(self.version, '1.1') >= 0,
'version_1_2_or_later':
util.version_compare(self.version, '1.2') >= 0,
'version_1_3_or_later':
util.version_compare(self.version, '1.3') >= 0,
'tabledata_format':
tabledata_format,
'_debug_python_based_parser': _debug_python_based_parser,
'_group_number': 1}
with util.convert_to_writable_filelike(
fd, compressed=compressed) as fd:
w = XMLWriter(fd)
version = self.version
if _astropy_version is None:
lib_version = astropy_version
else:
lib_version = _astropy_version
xml_header = """
<?xml version="1.0" encoding="utf-8"?>
<!-- Produced with astropy.io.votable version {lib_version}
http://www.astropy.org/ -->\n"""
w.write(xml_header.lstrip().format(**locals()))
with w.tag('VOTABLE',
{'version': version,
'xmlns:xsi':
"http://www.w3.org/2001/XMLSchema-instance",
'xsi:noNamespaceSchemaLocation':
"http://www.ivoa.net/xml/VOTable/v{}".format(version),
'xmlns':
"http://www.ivoa.net/xml/VOTable/v{}".format(version)}):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
element_sets = [self.coordinate_systems, self.params,
self.infos, self.resources]
if kwargs['version_1_2_or_later']:
element_sets[0] = self.groups
for element_set in element_sets:
for element in element_set:
element.to_xml(w, **kwargs)
def iter_tables(self):
"""
Iterates over all tables in the VOTable file in a "flat" way,
ignoring the nesting of resources etc.
"""
for resource in self.resources:
for table in resource.iter_tables():
yield table
def get_first_table(self):
"""
Often, you know there is only one table in the file, and
that's all you need. This method returns that first table.
"""
for table in self.iter_tables():
if not table.is_empty():
return table
raise IndexError("No table found in VOTABLE file.")
get_table_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_tables', 'TABLE',
"""
Looks up a TABLE_ element by the given ID. Used by the table
"ref" attribute.
""")
get_tables_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_tables', 'TABLE',
"""
Looks up a TABLE_ element by the given utype, and returns an
iterator emitting all matches.
""")
def get_table_by_index(self, idx):
"""
Get a table by its ordinal position in the file.
"""
for i, table in enumerate(self.iter_tables()):
if i == idx:
return table
raise IndexError(
"No table at index {:d} found in VOTABLE file.".format(idx))
def iter_fields_and_params(self):
"""
Recursively iterate over all FIELD_ and PARAM_ elements in the
VOTABLE_ file.
"""
for resource in self.resources:
for field in resource.iter_fields_and_params():
yield field
get_field_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given ID_. Used by the field's
"ref" attribute.
""")
get_fields_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given utype and returns an
iterator emitting all matches.
""")
get_field_by_id_or_name = _lookup_by_id_or_name_factory(
'iter_fields_and_params', 'FIELD',
"""
Looks up a FIELD_ element by the given ID_ or name.
""")
def iter_values(self):
"""
Recursively iterate over all VALUES_ elements in the VOTABLE_
file.
"""
for field in self.iter_fields_and_params():
yield field.values
get_values_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_values', 'VALUES',
"""
Looks up a VALUES_ element by the given ID. Used by the values
"ref" attribute.
""")
def iter_groups(self):
"""
Recursively iterate over all GROUP_ elements in the VOTABLE_
file.
"""
for table in self.iter_tables():
for group in table.iter_groups():
yield group
get_group_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_groups', 'GROUP',
"""
Looks up a GROUP_ element by the given ID. Used by the group's
"ref" attribute
""")
get_groups_by_utype = _lookup_by_attr_factory(
'utype', False, 'iter_groups', 'GROUP',
"""
Looks up a GROUP_ element by the given utype and returns an
iterator emitting all matches.
""")
def iter_coosys(self):
"""
Recursively iterate over all COOSYS_ elements in the VOTABLE_
file.
"""
for coosys in self.coordinate_systems:
yield coosys
for resource in self.resources:
for coosys in resource.iter_coosys():
yield coosys
get_coosys_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_coosys', 'COOSYS',
"""Looks up a COOSYS_ element by the given ID.""")
def iter_info(self):
"""
Recursively iterate over all INFO_ elements in the VOTABLE_
file.
"""
for info in self.infos:
yield info
for resource in self.resources:
for info in resource.iter_info():
yield info
get_info_by_id = _lookup_by_attr_factory(
'ID', True, 'iter_info', 'INFO',
"""Looks up a INFO element by the given ID.""")
def set_all_tables_format(self, format):
"""
Set the output storage format of all tables in the file.
"""
for table in self.iter_tables():
table.format = format
@classmethod
def from_table(cls, table, table_id=None):
"""
Create a `VOTableFile` instance from a given
`astropy.table.Table` instance.
Parameters
----------
table_id : str, optional
Set the given ID attribute on the returned Table instance.
"""
votable_file = cls()
resource = Resource()
votable = Table.from_table(votable_file, table)
if table_id is not None:
votable.ID = table_id
resource.tables.append(votable)
votable_file.resources.append(resource)
return votable_file
|
29eb56ed03a71fba27439ace7f62823fbde03e569fad7480062c224cba3a2786 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains a contains the high-level functions to read a
VOTable file.
"""
# STDLIB
import io
import os
import sys
import textwrap
import warnings
# LOCAL
from . import exceptions
from . import tree
from ...utils.xml import iterparser
from ...utils import data
__all__ = ['parse', 'parse_single_table', 'from_table', 'writeto', 'validate',
'reset_vo_warnings']
def parse(source, columns=None, invalid='exception', pedantic=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE, table_number=None,
table_id=None, filename=None, unit_format=None,
datatype_mapping=None, _debug_python_based_parser=False):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : str or readable file-like object
Path or file object containing a VOTABLE_ xml file.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
pedantic : bool, optional
When `True`, raise an error when the file violates the spec,
otherwise issue a warning. Warnings may be controlled using
the standard Python mechanisms. See the `warnings`
module in the Python standard library for more information.
When not provided, uses the configuration setting
``astropy.io.votable.pedantic``, which defaults to False.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.2 of
VOTable, and (probably) ``vounit`` in future versions of the
spec).
datatype_mapping : dict of str to str, optional
A mapping of datatype names to valid VOTable datatype names.
For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the
mapping ``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import conf
invalid = invalid.lower()
if invalid not in ('exception', 'mask'):
raise ValueError("accepted values of ``invalid`` are: "
"``'exception'`` or ``'mask'``.")
if pedantic is None:
pedantic = conf.pedantic
if datatype_mapping is None:
datatype_mapping = {}
config = {
'columns': columns,
'invalid': invalid,
'pedantic': pedantic,
'chunk_size': chunk_size,
'table_number': table_number,
'filename': filename,
'unit_format': unit_format,
'datatype_mapping': datatype_mapping
}
if filename is None and isinstance(source, str):
config['filename'] = source
with iterparser.get_xml_iterator(
source,
_debug_python_based_parser=_debug_python_based_parser) as iterator:
return tree.VOTableFile(
config=config, pos=(1, 1)).parse(iterator, config)
def parse_single_table(source, **kwargs):
"""
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.Table`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.Table` object
"""
if kwargs.get('table_number') is None:
kwargs['table_number'] = 0
votable = parse(source, **kwargs)
return votable.get_first_table()
def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or writable file-like object
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`votable-serialization`.
"""
from ...table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance")
table.to_xml(file, tabledata_format=tabledata_format,
_debug_python_based_parser=True)
def validate(source, output=None, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : str or readable file-like object
Path to a VOTABLE_ xml file or pathlib.path
object having Path to a VOTABLE_ xml file.
output : writable file-like object, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from ...utils.console import print_code_line, color_print
if output is None:
output = sys.stdout
return_as_str = False
if output is None:
output = io.StringIO()
lines = []
votable = None
reset_vo_warnings()
with data.get_readable_fileobj(source, encoding='binary') as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, str):
filename = source
elif hasattr(source, 'name'):
filename = source.name
elif hasattr(source, 'url'):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, pedantic=False, filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [str(x.message) for x in warning_lines if
issubclass(x.category, exceptions.VOWarning)] + lines
content_buffer.seek(0)
output.write("Validation report for {0}\n\n".format(filename))
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w['is_something']:
output.write(w['message'])
output.write('\n\n')
else:
line = xml_lines[w['nline'] - 1]
warning = w['warning']
if w['is_warning']:
color = 'yellow'
else:
color = 'red'
color_print(
'{0:d}: '.format(w['nline']), '',
warning or 'EXC', color,
': ', '',
textwrap.fill(
w['message'],
initial_indent=' ',
subsequent_indent=' ').lstrip(),
file=output)
print_code_line(line, w['nchar'], file=output)
output.write('\n')
else:
output.write('astropy.io.votable found no violations.\n\n')
success = 0
if xmllint and os.path.exists(filename):
from . import xmlutil
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = xmlutil.validate_schema(
filename, version)
if success != 0:
output.write(
'xmllint schema violations:\n\n')
output.write(stderr.decode('utf-8'))
else:
output.write('xmllint passed\n')
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0
def from_table(table, table_id=None):
"""
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.Table` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance
"""
return tree.VOTableFile.from_table(table, table_id=table_id)
def is_votable(source):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : str or readable file-like object
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
try:
with iterparser.get_xml_iterator(source) as iterator:
for start, tag, d, pos in iterator:
if tag != 'xml':
return False
break
for start, tag, d, pos in iterator:
if tag != 'VOTABLE':
return False
break
return True
except ValueError:
return False
def reset_vo_warnings():
"""
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called.
"""
from . import converters, xmlutil
# -----------------------------------------------------------#
# This is a special variable used by the Python warnings #
# infrastructure to keep track of warnings that have #
# already been seen. Since we want to get every single #
# warning out of this, we have to delete all of them first. #
# -----------------------------------------------------------#
for module in (converters, exceptions, tree, xmlutil):
if hasattr(module, '__warningregistry__'):
del module.__warningregistry__
|
ee68a3a0d8f550c0214bbc203d46a9b51b94a84593dfd4c54c4d75a7a012ff3c | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import datetime
import os
import sys
import warnings
from contextlib import suppress
from inspect import signature, Parameter
import numpy as np
from .. import conf
from ..file import _File
from ..header import Header, _pad_length
from ..util import (_is_int, _is_pseudo_unsigned, _unsigned_zero,
itersubclasses, decode_ascii, _get_array_mmap, first,
_free_space_check, _extract_number)
from ..verify import _Verify, _ErrList
from ....utils import lazyproperty
from ....utils.exceptions import AstropyUserWarning
from ....utils.decorators import deprecated_renamed_argument
class _Delayed:
pass
DELAYED = _Delayed()
BITPIX2DTYPE = {8: 'uint8', 16: 'int16', 32: 'int32', 64: 'int64',
-32: 'float32', -64: 'float64'}
"""Maps FITS BITPIX values to Numpy dtype names."""
DTYPE2BITPIX = {'uint8': 8, 'int16': 16, 'uint16': 16, 'int32': 32,
'uint32': 32, 'int64': 64, 'uint64': 64, 'float32': -32,
'float64': -64}
"""
Maps Numpy dtype names to FITS BITPIX values (this includes unsigned
integers, with the assumption that the pseudo-unsigned integer convention
will be used in this case.
"""
class InvalidHDUException(Exception):
"""
A custom exception class used mainly to signal to _BaseHDU.__new__ that
an HDU cannot possibly be considered valid, and must be assumed to be
corrupted.
"""
def _hdu_class_from_header(cls, header):
"""
Used primarily by _BaseHDU.__new__ to find an appropriate HDU class to use
based on values in the header. See the _BaseHDU.__new__ docstring.
"""
klass = cls # By default, if no subclasses are defined
if header:
for c in reversed(list(itersubclasses(cls))):
try:
# HDU classes built into astropy.io.fits are always considered,
# but extension HDUs must be explicitly registered
if not (c.__module__.startswith('astropy.io.fits.') or
c in cls._hdu_registry):
continue
if c.match_header(header):
klass = c
break
except NotImplementedError:
continue
except Exception as exc:
warnings.warn(
'An exception occurred matching an HDU header to the '
'appropriate HDU type: {0}'.format(exc),
AstropyUserWarning)
warnings.warn('The HDU will be treated as corrupted.',
AstropyUserWarning)
klass = _CorruptedHDU
del exc
break
return klass
class _BaseHDUMeta(type):
def __init__(cls, name, bases, members):
# The sole purpose of this metaclass right now is to add the same
# data.deleter to all HDUs with a data property.
# It's unfortunate, but there's otherwise no straightforward way
# that a property can inherit setters/deleters of the property of the
# same name on base classes
if 'data' in members:
data_prop = members['data']
if (isinstance(data_prop, (lazyproperty, property)) and
data_prop.fdel is None):
# Don't do anything if the class has already explicitly
# set the deleter for its data property
def data(self):
# The deleter
if self._file is not None and self._data_loaded:
data_refcount = sys.getrefcount(self.data)
# Manually delete *now* so that FITS_rec.__del__
# cleanup can happen if applicable
del self.__dict__['data']
# Don't even do this unless the *only* reference to the
# .data array was the one we're deleting by deleting
# this attribute; if any other references to the array
# are hanging around (perhaps the user ran ``data =
# hdu.data``) don't even consider this:
if data_refcount == 2:
self._file._maybe_close_mmap()
setattr(cls, 'data', data_prop.deleter(data))
# TODO: Come up with a better __repr__ for HDUs (and for HDULists, for that
# matter)
class _BaseHDU(metaclass=_BaseHDUMeta):
"""Base class for all HDU (header data unit) classes."""
_hdu_registry = set()
# This HDU type is part of the FITS standard
_standard = True
# Byte to use for padding out blocks
_padding_byte = '\x00'
_default_name = ''
def __new__(cls, data=None, header=None, *args, **kwargs):
"""
Iterates through the subclasses of _BaseHDU and uses that class's
match_header() method to determine which subclass to instantiate.
It's important to be aware that the class hierarchy is traversed in a
depth-last order. Each match_header() should identify an HDU type as
uniquely as possible. Abstract types may choose to simply return False
or raise NotImplementedError to be skipped.
If any unexpected exceptions are raised while evaluating
match_header(), the type is taken to be _CorruptedHDU.
"""
klass = _hdu_class_from_header(cls, header)
return super().__new__(klass)
def __init__(self, data=None, header=None, *args, **kwargs):
if header is None:
header = Header()
self._header = header
self._file = None
self._buffer = None
self._header_offset = None
self._data_offset = None
self._data_size = None
# This internal variable is used to track whether the data attribute
# still points to the same data array as when the HDU was originally
# created (this does not track whether the data is actually the same
# content-wise)
self._data_replaced = False
self._data_needs_rescale = False
self._new = True
self._output_checksum = False
if 'DATASUM' in self._header and 'CHECKSUM' not in self._header:
self._output_checksum = 'datasum'
elif 'CHECKSUM' in self._header:
self._output_checksum = True
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
return str(self._header.get('EXTNAME', self._default_name))
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if 'EXTNAME' in self._header:
self._header['EXTNAME'] = value
else:
self._header['EXTNAME'] = (value, 'extension name')
@property
def ver(self):
return self._header.get('EXTVER', 1)
@ver.setter
def ver(self, value):
if not _is_int(value):
raise TypeError("'ver' attribute must be an integer")
if 'EXTVER' in self._header:
self._header['EXTVER'] = value
else:
self._header['EXTVER'] = (value, 'extension value')
@property
def level(self):
return self._header.get('EXTLEVEL', 1)
@level.setter
def level(self, value):
if not _is_int(value):
raise TypeError("'level' attribute must be an integer")
if 'EXTLEVEL' in self._header:
self._header['EXTLEVEL'] = value
else:
self._header['EXTLEVEL'] = (value, 'extension level')
@property
def is_image(self):
return (
self.name == 'PRIMARY' or
('XTENSION' in self._header and
(self._header['XTENSION'] == 'IMAGE' or
(self._header['XTENSION'] == 'BINTABLE' and
'ZIMAGE' in self._header and self._header['ZIMAGE'] is True))))
@property
def _data_loaded(self):
return ('data' in self.__dict__ and self.data is not DELAYED)
@property
def _has_data(self):
return self._data_loaded and self.data is not None
@classmethod
def register_hdu(cls, hducls):
cls._hdu_registry.add(hducls)
@classmethod
def unregister_hdu(cls, hducls):
if hducls in cls._hdu_registry:
cls._hdu_registry.remove(hducls)
@classmethod
def match_header(cls, header):
raise NotImplementedError
@classmethod
def fromstring(cls, data, checksum=False, ignore_missing_end=False,
**kwargs):
"""
Creates a new HDU object of the appropriate type from a string
containing the HDU's entire header and, optionally, its data.
Note: When creating a new HDU from a string without a backing file
object, the data of that HDU may be read-only. It depends on whether
the underlying string was an immutable Python str/bytes object, or some
kind of read-write memory buffer such as a `memoryview`.
Parameters
----------
data : str, bytearray, memoryview, ndarray
A byte string containing the HDU's header and data.
checksum : bool, optional
Check the HDU's checksum and/or datasum.
ignore_missing_end : bool, optional
Ignore a missing end card in the header data. Note that without the
end card the end of the header may be ambiguous and resulted in a
corrupt HDU. In this case the assumption is that the first 2880
block that does not begin with valid FITS header data is the
beginning of the data.
kwargs : optional
May consist of additional keyword arguments specific to an HDU
type--these correspond to keywords recognized by the constructors of
different HDU classes such as `PrimaryHDU`, `ImageHDU`, or
`BinTableHDU`. Any unrecognized keyword arguments are simply
ignored.
"""
return cls._readfrom_internal(data, checksum=checksum,
ignore_missing_end=ignore_missing_end,
**kwargs)
@classmethod
def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False,
**kwargs):
"""
Read the HDU from a file. Normally an HDU should be opened with
:func:`open` which reads the entire HDU list in a FITS file. But this
method is still provided for symmetry with :func:`writeto`.
Parameters
----------
fileobj : file object or file-like object
Input FITS file. The file's seek pointer is assumed to be at the
beginning of the HDU.
checksum : bool
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card
values (when present in the HDU header) match the header and data
of all HDU's in the file.
ignore_missing_end : bool
Do not issue an exception when opening a file that is missing an
``END`` card in the last header.
"""
# TODO: Figure out a way to make it possible for the _File
# constructor to be a noop if the argument is already a _File
if not isinstance(fileobj, _File):
fileobj = _File(fileobj)
hdu = cls._readfrom_internal(fileobj, checksum=checksum,
ignore_missing_end=ignore_missing_end,
**kwargs)
# If the checksum had to be checked the data may have already been read
# from the file, in which case we don't want to seek relative
fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET)
return hdu
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(self, name, output_verify='exception', overwrite=False,
checksum=False):
"""
Write the HDU to a new file. This is a convenience method to
provide a user easier output interface if only one HDU needs
to be written to a file.
Parameters
----------
name : file path, file object or file-like object
Output FITS file. If the file object is already opened, it must
be opened in a writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the header of the HDU when written to the file.
"""
from .hdulist import HDUList
hdulist = HDUList([self])
hdulist.writeto(name, output_verify, overwrite=overwrite,
checksum=checksum)
@classmethod
def _readfrom_internal(cls, data, header=None, checksum=False,
ignore_missing_end=False, **kwargs):
"""
Provides the bulk of the internal implementation for readfrom and
fromstring.
For some special cases, supports using a header that was already
created, and just using the input data for the actual array data.
"""
hdu_buffer = None
hdu_fileobj = None
header_offset = 0
if isinstance(data, _File):
if header is None:
header_offset = data.tell()
header = Header.fromfile(data, endcard=not ignore_missing_end)
hdu_fileobj = data
data_offset = data.tell() # *after* reading the header
else:
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype='ubyte', buffer=data)
except TypeError:
raise TypeError(
'The provided object {!r} does not contain an underlying '
'memory buffer. fromstring() requires an object that '
'supports the buffer interface such as bytes, buffer, '
'memoryview, ndarray, etc. This restriction is to ensure '
'that efficient access to the array/table data is possible.'
.format(data))
if header is None:
def block_iter(nbytes):
idx = 0
while idx < len(data):
yield data[idx:idx + nbytes]
idx += nbytes
header_str, header = Header._from_blocks(
block_iter, True, '', not ignore_missing_end, True)
if len(data) > len(header_str):
hdu_buffer = data
elif data:
hdu_buffer = data
header_offset = 0
data_offset = len(header_str)
# Determine the appropriate arguments to pass to the constructor from
# self._kwargs. self._kwargs contains any number of optional arguments
# that may or may not be valid depending on the HDU type
cls = _hdu_class_from_header(cls, header)
sig = signature(cls.__init__)
new_kwargs = kwargs.copy()
if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()):
# If __init__ accepts arbitrary keyword arguments, then we can go
# ahead and pass all keyword arguments; otherwise we need to delete
# any that are invalid
for key in kwargs:
if key not in sig.parameters:
del new_kwargs[key]
hdu = cls(data=DELAYED, header=header, **new_kwargs)
# One of these may be None, depending on whether the data came from a
# file or a string buffer--later this will be further abstracted
hdu._file = hdu_fileobj
hdu._buffer = hdu_buffer
hdu._header_offset = header_offset # beginning of the header area
hdu._data_offset = data_offset # beginning of the data area
# data area size, including padding
size = hdu.size
hdu._data_size = size + _pad_length(size)
# Checksums are not checked on invalid HDU types
if checksum and checksum != 'remove' and isinstance(hdu, _ValidHDU):
hdu._verify_checksum_datasum()
return hdu
def _get_raw_data(self, shape, code, offset):
"""
Return raw array from either the HDU's memory buffer or underlying
file.
"""
if isinstance(shape, int):
shape = (shape,)
if self._buffer:
return np.ndarray(shape, dtype=code, buffer=self._buffer,
offset=offset)
elif self._file:
return self._file.readarray(offset=offset, dtype=code, shape=shape)
else:
return None
# TODO: Rework checksum handling so that it's not necessary to add a
# checksum argument here
# TODO: The BaseHDU class shouldn't even handle checksums since they're
# only implemented on _ValidHDU...
def _prewriteto(self, checksum=False, inplace=False):
self._update_uint_scale_keywords()
# Handle checksum
self._update_checksum(checksum)
def _update_uint_scale_keywords(self):
"""
If the data is unsigned int 16, 32, or 64 add BSCALE/BZERO cards to
header.
"""
if (self._has_data and self._standard and
_is_pseudo_unsigned(self.data.dtype)):
# CompImageHDUs need TFIELDS immediately after GCOUNT,
# so BSCALE has to go after TFIELDS if it exists.
if 'TFIELDS' in self._header:
self._header.set('BSCALE', 1, after='TFIELDS')
elif 'GCOUNT' in self._header:
self._header.set('BSCALE', 1, after='GCOUNT')
else:
self._header.set('BSCALE', 1)
self._header.set('BZERO', _unsigned_zero(self.data.dtype),
after='BSCALE')
def _update_checksum(self, checksum, checksum_keyword='CHECKSUM',
datasum_keyword='DATASUM'):
"""Update the 'CHECKSUM' and 'DATASUM' keywords in the header (or
keywords with equivalent semantics given by the ``checksum_keyword``
and ``datasum_keyword`` arguments--see for example ``CompImageHDU``
for an example of why this might need to be overridden).
"""
# If the data is loaded it isn't necessarily 'modified', but we have no
# way of knowing for sure
modified = self._header._modified or self._data_loaded
if checksum == 'remove':
if checksum_keyword in self._header:
del self._header[checksum_keyword]
if datasum_keyword in self._header:
del self._header[datasum_keyword]
elif (modified or self._new or
(checksum and ('CHECKSUM' not in self._header or
'DATASUM' not in self._header or
not self._checksum_valid or
not self._datasum_valid))):
if checksum == 'datasum':
self.add_datasum(datasum_keyword=datasum_keyword)
elif checksum:
self.add_checksum(checksum_keyword=checksum_keyword,
datasum_keyword=datasum_keyword)
def _postwriteto(self):
# If data is unsigned integer 16, 32 or 64, remove the
# BSCALE/BZERO cards
if (self._has_data and self._standard and
_is_pseudo_unsigned(self.data.dtype)):
for keyword in ('BSCALE', 'BZERO'):
with suppress(KeyError):
del self._header[keyword]
def _writeheader(self, fileobj):
offset = 0
if not fileobj.simulateonly:
with suppress(AttributeError, OSError):
offset = fileobj.tell()
self._header.tofile(fileobj)
try:
size = fileobj.tell() - offset
except (AttributeError, OSError):
size = len(str(self._header))
else:
size = len(str(self._header))
return offset, size
def _writedata(self, fileobj):
# TODO: A lot of the simulateonly stuff should be moved back into the
# _File class--basically it should turn write and flush into a noop
offset = 0
size = 0
if not fileobj.simulateonly:
fileobj.flush()
try:
offset = fileobj.tell()
except OSError:
offset = 0
if self._data_loaded or self._data_needs_rescale:
if self.data is not None:
size += self._writedata_internal(fileobj)
# pad the FITS data block
if size > 0:
padding = _pad_length(size) * self._padding_byte
# TODO: Not that this is ever likely, but if for some odd
# reason _padding_byte is > 0x80 this will fail; but really if
# somebody's custom fits format is doing that, they're doing it
# wrong and should be reprimanded harshly.
fileobj.write(padding.encode('ascii'))
size += len(padding)
else:
# The data has not been modified or does not need need to be
# rescaled, so it can be copied, unmodified, directly from an
# existing file or buffer
size += self._writedata_direct_copy(fileobj)
# flush, to make sure the content is written
if not fileobj.simulateonly:
fileobj.flush()
# return both the location and the size of the data area
return offset, size
def _writedata_internal(self, fileobj):
"""
The beginning and end of most _writedata() implementations are the
same, but the details of writing the data array itself can vary between
HDU types, so that should be implemented in this method.
Should return the size in bytes of the data written.
"""
if not fileobj.simulateonly:
fileobj.writearray(self.data)
return self.data.size * self.data.itemsize
def _writedata_direct_copy(self, fileobj):
"""Copies the data directly from one file/buffer to the new file.
For now this is handled by loading the raw data from the existing data
(including any padding) via a memory map or from an already in-memory
buffer and using Numpy's existing file-writing facilities to write to
the new file.
If this proves too slow a more direct approach may be used.
"""
raw = self._get_raw_data(self._data_size, 'ubyte', self._data_offset)
if raw is not None:
fileobj.writearray(raw)
return raw.nbytes
else:
return 0
# TODO: This is the start of moving HDU writing out of the _File class;
# Though right now this is an internal private method (though still used by
# HDUList, eventually the plan is to have this be moved into writeto()
# somehow...
def _writeto(self, fileobj, inplace=False, copy=False):
try:
dirname = os.path.dirname(fileobj._file.name)
except AttributeError:
dirname = None
with _free_space_check(self, dirname):
self._writeto_internal(fileobj, inplace, copy)
def _writeto_internal(self, fileobj, inplace, copy):
# For now fileobj is assumed to be a _File object
if not inplace or self._new:
header_offset, _ = self._writeheader(fileobj)
data_offset, data_size = self._writedata(fileobj)
# Set the various data location attributes on newly-written HDUs
if self._new:
self._header_offset = header_offset
self._data_offset = data_offset
self._data_size = data_size
return
hdrloc = self._header_offset
hdrsize = self._data_offset - self._header_offset
datloc = self._data_offset
datsize = self._data_size
if self._header._modified:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# This should update hdrloc with he header location in the new file
hdrloc, hdrsize = self._writeheader(fileobj)
# If the data is to be written below with self._writedata, that
# will also properly update the data location; but it should be
# updated here too
datloc = hdrloc + hdrsize
elif copy:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# Before writing, update the hdrloc with the current file position,
# which is the hdrloc for the new file
hdrloc = fileobj.tell()
fileobj.write(self._file.read(hdrsize))
# The header size is unchanged, but the data location may be
# different from before depending on if previous HDUs were resized
datloc = fileobj.tell()
if self._data_loaded:
if self.data is not None:
# Seek through the array's bases for an memmap'd array; we
# can't rely on the _File object to give us this info since
# the user may have replaced the previous mmap'd array
if copy or self._data_replaced:
# Of course, if we're copying the data to a new file
# we don't care about flushing the original mmap;
# instead just read it into the new file
array_mmap = None
else:
array_mmap = _get_array_mmap(self.data)
if array_mmap is not None:
array_mmap.flush()
else:
self._file.seek(self._data_offset)
datloc, datsize = self._writedata(fileobj)
elif copy:
datsize = self._writedata_direct_copy(fileobj)
self._header_offset = hdrloc
self._data_offset = datloc
self._data_size = datsize
self._data_replaced = False
def _close(self, closed=True):
# If the data was mmap'd, close the underlying mmap (this will
# prevent any future access to the .data attribute if there are
# not other references to it; if there are other references then
# it is up to the user to clean those up
if (closed and self._data_loaded and
_get_array_mmap(self.data) is not None):
del self.data
# For backwards-compatibility, though nobody should have
# been using this directly:
_AllHDU = _BaseHDU
# For convenience...
# TODO: register_hdu could be made into a class decorator which would be pretty
# cool, but only once 2.6 support is dropped.
register_hdu = _BaseHDU.register_hdu
unregister_hdu = _BaseHDU.unregister_hdu
class _CorruptedHDU(_BaseHDU):
"""
A Corrupted HDU class.
This class is used when one or more mandatory `Card`s are
corrupted (unparsable), such as the ``BITPIX``, ``NAXIS``, or
``END`` cards. A corrupted HDU usually means that the data size
cannot be calculated or the ``END`` card is not found. In the case
of a missing ``END`` card, the `Header` may also contain the binary
data
.. note::
In future, it may be possible to decipher where the last block
of the `Header` ends, but this task may be difficult when the
extension is a `TableHDU` containing ASCII data.
"""
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
# Note: On compressed files this might report a negative size; but the
# file is corrupt anyways so I'm not too worried about it.
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _summary(self):
return (self.name, self.ver, 'CorruptedHDU')
def verify(self):
pass
class _NonstandardHDU(_BaseHDU, _Verify):
"""
A Non-standard HDU class.
This class is used for a Primary HDU when the ``SIMPLE`` Card has
a value of `False`. A non-standard HDU comes from a file that
resembles a FITS file but departs from the standards in some
significant way. One example would be files where the numbers are
in the DEC VAX internal storage format rather than the standard
FITS most significant byte first. The header for this HDU should
be valid. The data for this HDU is read from the file as a byte
stream that begins at the first byte after the header ``END`` card
and continues until the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any HDU that has the 'SIMPLE' keyword but is not a standard
Primary or Groups HDU.
"""
# The SIMPLE keyword must be in the first card
card = header.cards[0]
# The check that 'GROUPS' is missing is a bit redundant, since the
# match_header for GroupsHDU will always be called before this one.
if card.keyword == 'SIMPLE':
if 'GROUPS' not in header and card.value is False:
return True
else:
raise InvalidHDUException
else:
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _writedata(self, fileobj):
"""
Differs from the base class :class:`_writedata` in that it doesn't
automatically add padding, and treats the data as a string of raw bytes
instead of an array.
"""
offset = 0
size = 0
if not fileobj.simulateonly:
fileobj.flush()
try:
offset = fileobj.tell()
except OSError:
offset = 0
if self.data is not None:
if not fileobj.simulateonly:
fileobj.write(self.data)
# flush, to make sure the content is written
fileobj.flush()
size = len(self.data)
# return both the location and the size of the data area
return offset, size
def _summary(self):
return (self.name, self.ver, 'NonstandardHDU', len(self._header))
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, 'ubyte', self._data_offset)
def _verify(self, option='warn'):
errs = _ErrList([], unit='Card')
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
class _ValidHDU(_BaseHDU, _Verify):
"""
Base class for all HDUs which are not corrupted.
"""
def __init__(self, data=None, header=None, name=None, ver=None, **kwargs):
super().__init__(data=data, header=header)
# NOTE: private data members _checksum and _datasum are used by the
# utility script "fitscheck" to detect missing checksums.
self._checksum = None
self._checksum_valid = None
self._datasum = None
self._datasum_valid = None
if name is not None:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
Matches any HDU that is not recognized as having either the SIMPLE or
XTENSION keyword in its header's first card, but is nonetheless not
corrupted.
TODO: Maybe it would make more sense to use _NonstandardHDU in this
case? Not sure...
"""
return first(header.keys()) not in ('SIMPLE', 'XTENSION')
@property
def size(self):
"""
Size (in bytes) of the data portion of the HDU.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
if naxis > 0:
size = 1
for idx in range(naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def filebytes(self):
"""
Calculates and returns the number of bytes that this HDU will write to
a file.
"""
f = _File()
# TODO: Fix this once new HDU writing API is settled on
return self._writeheader(f)[1] + self._writedata(f)[1]
def fileinfo(self):
"""
Returns a dictionary detailing information about the locations
of this HDU within any associated file. The values are only
valid after a read or write of the associated file with no
intervening changes to the `HDUList`.
Returns
-------
dict or None
The dictionary details information about the locations of
this HDU within an associated file. Returns `None` when
the HDU is not associated with a file.
Dictionary contents:
========== ================================================
Key Value
========== ================================================
file File object associated with the HDU
filemode Mode in which the file was opened (readonly, copyonwrite,
update, append, ostream)
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ================================================
"""
if hasattr(self, '_file') and self._file:
return {'file': self._file, 'filemode': self._file.mode,
'hdrLoc': self._header_offset, 'datLoc': self._data_offset,
'datSpan': self._data_size}
else:
return None
def copy(self):
"""
Make a copy of the HDU, both header and data are copied.
"""
if self.data is not None:
data = self.data.copy()
else:
data = None
return self.__class__(data=data, header=self._header.copy())
def _verify(self, option='warn'):
errs = _ErrList([], unit='Card')
is_valid = BITPIX2DTYPE.__contains__
# Verify location and value of mandatory keywords.
# Do the first card here, instead of in the respective HDU classes, so
# the checking is in order, in case of required cards in wrong order.
if isinstance(self, ExtensionHDU):
firstkey = 'XTENSION'
firstval = self._extension
else:
firstkey = 'SIMPLE'
firstval = True
self.req_cards(firstkey, 0, None, firstval, option, errs)
self.req_cards('BITPIX', 1, lambda v: (_is_int(v) and is_valid(v)), 8,
option, errs)
self.req_cards('NAXIS', 2,
lambda v: (_is_int(v) and 0 <= v <= 999), 0,
option, errs)
naxis = self._header.get('NAXIS', 0)
if naxis < 1000:
for ax in range(3, naxis + 3):
key = 'NAXIS' + str(ax - 2)
self.req_cards(key, ax,
lambda v: (_is_int(v) and v >= 0),
_extract_number(self._header[key], default=1),
option, errs)
# Remove NAXISj cards where j is not in range 1, naxis inclusive.
for keyword in self._header:
if keyword.startswith('NAXIS') and len(keyword) > 5:
try:
number = int(keyword[5:])
if number <= 0 or number > naxis:
raise ValueError
except ValueError:
err_text = ("NAXISj keyword out of range ('{}' when "
"NAXIS == {})".format(keyword, naxis))
def fix(self=self, keyword=keyword):
del self._header[keyword]
errs.append(
self.run_option(option=option, err_text=err_text,
fix=fix, fix_text="Deleted."))
# Verify that the EXTNAME keyword exists and is a string
if 'EXTNAME' in self._header:
if not isinstance(self._header['EXTNAME'], str):
err_text = 'The EXTNAME keyword must have a string value.'
fix_text = 'Converted the EXTNAME keyword to a string value.'
def fix(header=self._header):
header['EXTNAME'] = str(header['EXTNAME'])
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
# TODO: Improve this API a little bit--for one, most of these arguments
# could be optional
def req_cards(self, keyword, pos, test, fix_value, option, errlist):
"""
Check the existence, location, and value of a required `Card`.
Parameters
----------
keyword : str
The keyword to validate
pos : int, callable
If an ``int``, this specifies the exact location this card should
have in the header. Remember that Python is zero-indexed, so this
means ``pos=0`` requires the card to be the first card in the
header. If given a callable, it should take one argument--the
actual position of the keyword--and return `True` or `False`. This
can be used for custom evaluation. For example if
``pos=lambda idx: idx > 10`` this will check that the keyword's
index is greater than 10.
test : callable
This should be a callable (generally a function) that is passed the
value of the given keyword and returns `True` or `False`. This can
be used to validate the value associated with the given keyword.
fix_value : str, int, float, complex, bool, None
A valid value for a FITS keyword to to use if the given ``test``
fails to replace an invalid value. In other words, this provides
a default value to use as a replacement if the keyword's current
value is invalid. If `None`, there is no replacement value and the
keyword is unfixable.
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
errlist : list
A list of validation errors already found in the FITS file; this is
used primarily for the validation system to collect errors across
multiple HDUs and multiple calls to `req_cards`.
Notes
-----
If ``pos=None``, the card can be anywhere in the header. If the card
does not exist, the new card will have the ``fix_value`` as its value
when created. Also check the card's value by using the ``test``
argument.
"""
errs = errlist
fix = None
try:
index = self._header.index(keyword)
except ValueError:
index = None
fixable = fix_value is not None
insert_pos = len(self._header) + 1
# If pos is an int, insert at the given position (and convert it to a
# lambda)
if _is_int(pos):
insert_pos = pos
pos = lambda x: x == insert_pos
# if the card does not exist
if index is None:
err_text = "'{}' card does not exist.".format(keyword)
fix_text = "Fixed by inserting a new '{}' card.".format(keyword)
if fixable:
# use repr to accommodate both string and non-string types
# Boolean is also OK in this constructor
card = (keyword, fix_value)
def fix(self=self, insert_pos=insert_pos, card=card):
self._header.insert(insert_pos, card)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix, fixable=fixable))
else:
# if the supposed location is specified
if pos is not None:
if not pos(index):
err_text = ("'{}' card at the wrong place "
"(card {}).".format(keyword, index))
fix_text = ("Fixed by moving it to the right place "
"(card {}).".format(insert_pos))
def fix(self=self, index=index, insert_pos=insert_pos):
card = self._header.cards[index]
del self._header[index]
self._header.insert(insert_pos, card)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# if value checking is specified
if test:
val = self._header[keyword]
if not test(val):
err_text = ("'{}' card has invalid value '{}'.".format(
keyword, val))
fix_text = ("Fixed by setting a new value '{}'.".format(
fix_value))
if fixable:
def fix(self=self, keyword=keyword, val=fix_value):
self._header[keyword] = fix_value
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix, fixable=fixable))
return errs
def add_datasum(self, when=None, datasum_keyword='DATASUM'):
"""
Add the ``DATASUM`` card to this HDU with the value set to the
checksum calculated for the data.
Parameters
----------
when : str, optional
Comment string for the card that by default represents the
time when the checksum was calculated
datasum_keyword : str, optional
The name of the header keyword to store the datasum value in;
this is typically 'DATASUM' per convention, but there exist
use cases in which a different keyword should be used
Returns
-------
checksum : int
The calculated datasum
Notes
-----
For testing purposes, provide a ``when`` argument to enable the comment
value in the card to remain consistent. This will enable the
generation of a ``CHECKSUM`` card with a consistent value.
"""
cs = self._calculate_datasum()
if when is None:
when = 'data unit checksum updated {}'.format(self._get_timestamp())
self._header[datasum_keyword] = (str(cs), when)
return cs
def add_checksum(self, when=None, override_datasum=False,
checksum_keyword='CHECKSUM', datasum_keyword='DATASUM'):
"""
Add the ``CHECKSUM`` and ``DATASUM`` cards to this HDU with
the values set to the checksum calculated for the HDU and the
data respectively. The addition of the ``DATASUM`` card may
be overridden.
Parameters
----------
when : str, optional
comment string for the cards; by default the comments
will represent the time when the checksum was calculated
override_datasum : bool, optional
add the ``CHECKSUM`` card only
checksum_keyword : str, optional
The name of the header keyword to store the checksum value in; this
is typically 'CHECKSUM' per convention, but there exist use cases
in which a different keyword should be used
datasum_keyword : str, optional
See ``checksum_keyword``
Notes
-----
For testing purposes, first call `add_datasum` with a ``when``
argument, then call `add_checksum` with a ``when`` argument and
``override_datasum`` set to `True`. This will provide consistent
comments for both cards and enable the generation of a ``CHECKSUM``
card with a consistent value.
"""
if not override_datasum:
# Calculate and add the data checksum to the header.
data_cs = self.add_datasum(when, datasum_keyword=datasum_keyword)
else:
# Just calculate the data checksum
data_cs = self._calculate_datasum()
if when is None:
when = 'HDU checksum updated {}'.format(self._get_timestamp())
# Add the CHECKSUM card to the header with a value of all zeros.
if datasum_keyword in self._header:
self._header.set(checksum_keyword, '0' * 16, when,
before=datasum_keyword)
else:
self._header.set(checksum_keyword, '0' * 16, when)
csum = self._calculate_checksum(data_cs,
checksum_keyword=checksum_keyword)
self._header[checksum_keyword] = csum
def verify_datasum(self):
"""
Verify that the value in the ``DATASUM`` keyword matches the value
calculated for the ``DATASUM`` of the current HDU data.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``DATASUM`` keyword present
"""
if 'DATASUM' in self._header:
datasum = self._calculate_datasum()
if datasum == int(self._header['DATASUM']):
return 1
else:
# Failed
return 0
else:
return 2
def verify_checksum(self):
"""
Verify that the value in the ``CHECKSUM`` keyword matches the
value calculated for the current HDU CHECKSUM.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``CHECKSUM`` keyword present
"""
if 'CHECKSUM' in self._header:
if 'DATASUM' in self._header:
datasum = self._calculate_datasum()
else:
datasum = 0
checksum = self._calculate_checksum(datasum)
if checksum == self._header['CHECKSUM']:
return 1
else:
# Failed
return 0
else:
return 2
def _verify_checksum_datasum(self):
"""
Verify the checksum/datasum values if the cards exist in the header.
Simply displays warnings if either the checksum or datasum don't match.
"""
if 'CHECKSUM' in self._header:
self._checksum = self._header['CHECKSUM']
self._checksum_valid = self.verify_checksum()
if not self._checksum_valid:
warnings.warn(
'Checksum verification failed for HDU {0}.\n'.format(
(self.name, self.ver)), AstropyUserWarning)
if 'DATASUM' in self._header:
self._datasum = self._header['DATASUM']
self._datasum_valid = self.verify_datasum()
if not self._datasum_valid:
warnings.warn(
'Datasum verification failed for HDU {0}.\n'.format(
(self.name, self.ver)), AstropyUserWarning)
def _get_timestamp(self):
"""
Return the current timestamp in ISO 8601 format, with microseconds
stripped off.
Ex.: 2007-05-30T19:05:11
"""
return datetime.datetime.now().isoformat()[:19]
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if not self._data_loaded:
# This is the case where the data has not been read from the file
# yet. We find the data in the file, read it, and calculate the
# datasum.
if self.size > 0:
raw_data = self._get_raw_data(self._data_size, 'ubyte',
self._data_offset)
return self._compute_checksum(raw_data)
else:
return 0
elif self.data is not None:
return self._compute_checksum(self.data.view('ubyte'))
else:
return 0
def _calculate_checksum(self, datasum, checksum_keyword='CHECKSUM'):
"""
Calculate the value of the ``CHECKSUM`` card in the HDU.
"""
old_checksum = self._header[checksum_keyword]
self._header[checksum_keyword] = '0' * 16
# Convert the header to bytes.
s = self._header.tostring().encode('utf8')
# Calculate the checksum of the Header and data.
cs = self._compute_checksum(np.frombuffer(s, dtype='ubyte'), datasum)
# Encode the checksum into a string.
s = self._char_encode(~cs)
# Return the header card value.
self._header[checksum_keyword] = old_checksum
return s
def _compute_checksum(self, data, sum32=0):
"""
Compute the ones-complement checksum of a sequence of bytes.
Parameters
----------
data
a memory region to checksum
sum32
incremental checksum value from another region
Returns
-------
ones complement checksum
"""
blocklen = 2880
sum32 = np.uint32(sum32)
for i in range(0, len(data), blocklen):
length = min(blocklen, len(data) - i) # ????
sum32 = self._compute_hdu_checksum(data[i:i + length], sum32)
return sum32
def _compute_hdu_checksum(self, data, sum32=0):
"""
Translated from FITS Checksum Proposal by Seaman, Pence, and Rots.
Use uint32 literals as a hedge against type promotion to int64.
This code should only be called with blocks of 2880 bytes
Longer blocks result in non-standard checksums with carry overflow
Historically, this code *was* called with larger blocks and for that
reason still needs to be for backward compatibility.
"""
u8 = np.uint32(8)
u16 = np.uint32(16)
uFFFF = np.uint32(0xFFFF)
if data.nbytes % 2:
last = data[-1]
data = data[:-1]
else:
last = np.uint32(0)
data = data.view('>u2')
hi = sum32 >> u16
lo = sum32 & uFFFF
hi += np.add.reduce(data[0::2], dtype=np.uint64)
lo += np.add.reduce(data[1::2], dtype=np.uint64)
if (data.nbytes // 2) % 2:
lo += last << u8
else:
hi += last << u8
hicarry = hi >> u16
locarry = lo >> u16
while hicarry or locarry:
hi = (hi & uFFFF) + locarry
lo = (lo & uFFFF) + hicarry
hicarry = hi >> u16
locarry = lo >> u16
return (hi << u16) + lo
# _MASK and _EXCLUDE used for encoding the checksum value into a character
# string.
_MASK = [0xFF000000,
0x00FF0000,
0x0000FF00,
0x000000FF]
_EXCLUDE = [0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60]
def _encode_byte(self, byte):
"""
Encode a single byte.
"""
quotient = byte // 4 + ord('0')
remainder = byte % 4
ch = np.array(
[(quotient + remainder), quotient, quotient, quotient],
dtype='int32')
check = True
while check:
check = False
for x in self._EXCLUDE:
for j in [0, 2]:
if ch[j] == x or ch[j + 1] == x:
ch[j] += 1
ch[j + 1] -= 1
check = True
return ch
def _char_encode(self, value):
"""
Encodes the checksum ``value`` using the algorithm described
in SPR section A.7.2 and returns it as a 16 character string.
Parameters
----------
value
a checksum
Returns
-------
ascii encoded checksum
"""
value = np.uint32(value)
asc = np.zeros((16,), dtype='byte')
ascii = np.zeros((16,), dtype='byte')
for i in range(4):
byte = (value & self._MASK[i]) >> ((3 - i) * 8)
ch = self._encode_byte(byte)
for j in range(4):
asc[4 * j + i] = ch[j]
for i in range(16):
ascii[i] = asc[(i + 15) % 16]
return decode_ascii(ascii.tostring())
class ExtensionHDU(_ValidHDU):
"""
An extension HDU class.
This class is the base class for the `TableHDU`, `ImageHDU`, and
`BinTableHDU` classes.
"""
_extension = ''
@classmethod
def match_header(cls, header):
"""
This class should never be instantiated directly. Either a standard
extension HDU type should be used for a specific extension, or
NonstandardExtHDU should be used.
"""
raise NotImplementedError
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(self, name, output_verify='exception', overwrite=False,
checksum=False):
"""
Works similarly to the normal writeto(), but prepends a default
`PrimaryHDU` are required by extension HDUs (which cannot stand on
their own).
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
"""
from .hdulist import HDUList
from .image import PrimaryHDU
hdulist = HDUList([PrimaryHDU(), self])
hdulist.writeto(name, output_verify, overwrite=overwrite,
checksum=checksum)
def _verify(self, option='warn'):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
naxis = self._header.get('NAXIS', 0)
self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v >= 0),
0, option, errs)
self.req_cards('GCOUNT', naxis + 4, lambda v: (_is_int(v) and v == 1),
1, option, errs)
return errs
# For backwards compatibility, though this needs to be deprecated
# TODO: Mark this as deprecated
_ExtensionHDU = ExtensionHDU
class NonstandardExtHDU(ExtensionHDU):
"""
A Non-standard Extension HDU class.
This class is used for an Extension HDU when the ``XTENSION``
`Card` has a non-standard value. In this case, Astropy can figure
out how big the data is but not what it is. The data for this HDU
is read from the file as a byte stream that begins at the first
byte after the header ``END`` card and continues until the
beginning of the next header or the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any extension HDU that is not one of the standard extension HDU
types.
"""
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
# A3DTABLE is not really considered a 'standard' extension, as it was
# sort of the prototype for BINTABLE; however, since our BINTABLE
# implementation handles A3DTABLE HDUs it is listed here.
standard_xtensions = ('IMAGE', 'TABLE', 'BINTABLE', 'A3DTABLE')
# The check that xtension is not one of the standard types should be
# redundant.
return (card.keyword == 'XTENSION' and
xtension not in standard_xtensions)
def _summary(self):
axes = tuple(self.data.shape)
return (self.name, self.ver, 'NonstandardExtHDU', len(self._header), axes)
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, 'ubyte', self._data_offset)
# TODO: Mark this as deprecated
_NonstandardExtHDU = NonstandardExtHDU
|
01a0a27afda7b03d56995225a65c6d6d07bebf62a95d093bd37c5e9aec55f4fd | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import bz2
import gzip
import itertools
import os
import shutil
import sys
import warnings
import numpy as np
from . import compressed
from .base import _BaseHDU, _ValidHDU, _NonstandardHDU, ExtensionHDU
from .groups import GroupsHDU
from .image import PrimaryHDU, ImageHDU
from ..file import _File
from ..header import _pad_length
from ..util import (_is_int, _tmp_name, fileobj_closed, ignore_sigint,
_get_array_mmap, _free_space_check)
from ..verify import _Verify, _ErrList, VerifyError, VerifyWarning
from ....utils import indent
from ....utils.exceptions import AstropyUserWarning
from ....utils.decorators import deprecated_renamed_argument
def fitsopen(name, mode='readonly', memmap=None, save_backup=False,
cache=True, lazy_load_hdus=None, **kwargs):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : file path, file object, file-like object or pathlib.Path object
File to be opened.
mode : str, optional
Open mode, 'readonly' (default), 'update', 'append', 'denywrite', or
'ostream'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used?
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that a
backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache (default: `True`).
lazy_load_hdus : bool, option
By default `~astropy.io.fits.open` will not read all the HDUs and
headers in a FITS file immediately upon opening. This is an
optimization especially useful for large files, as FITS has no way
of determining the number and offsets of all the HDUs in a file
without scanning through the file and reading all the headers.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
kwargs : dict, optional
additional optional keyword arguments, possible values are:
- **uint** : bool
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
This is enabled by default so that the pseudo-unsigned
integer convention is assumed.
Note, for backward compatibility, the kwarg **uint16** may
be used instead. The kwarg was renamed when support was
added for integers of any size.
- **ignore_missing_end** : bool
Do not issue an exception when opening a file that is
missing an ``END`` card in the last header.
- **checksum** : bool, str
If `True`, verifies that both ``DATASUM`` and
``CHECKSUM`` card values (when present in the HDU header)
match the header and data of all HDU's in the file. Updates to a
file that already has a checksum will preserve and update the
existing checksums unless this argument is given a value of
'remove', in which case the CHECKSUM and DATASUM values are not
checked, and are removed when saving changes to the file.
- **disable_image_compression** : bool
If `True`, treats compressed image HDU's like normal
binary table HDU's.
- **do_not_scale_image_data** : bool
If `True`, image data is not scaled using BSCALE/BZERO values
when read.
- **character_as_bytes** : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
- **ignore_blank** : bool
If `True`, the BLANK keyword is ignored if present.
- **scale_back** : bool
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data.
Returns
-------
hdulist : an `HDUList` object
`HDUList` containing all of the header data units in the
file.
"""
from .. import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if lazy_load_hdus is None:
lazy_load_hdus = conf.lazy_load_hdus
else:
lazy_load_hdus = bool(lazy_load_hdus)
if 'uint' not in kwargs:
kwargs['uint'] = conf.enable_uint
if not name:
raise ValueError('Empty filename: {!r}'.format(name))
return HDUList.fromfile(name, mode, memmap, save_backup, cache,
lazy_load_hdus, **kwargs)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : sequence of HDU objects or single HDU, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file object, bytes, optional
The opened physical file associated with the `HDUList`
or a bytes object containing the contents of the FITS
file.
"""
if isinstance(file, bytes):
self._data = file
self._file = None
else:
self._file = file
self._data = None
self._save_backup = False
# For internal use only--the keyword args passed to fitsopen /
# HDUList.fromfile/string when opening the file
self._open_kwargs = {}
self._in_read_next_hdu = False
# If we have read all the HDUs from the file or not
# The assumes that all HDUs have been written when we first opened the
# file; we do not currently support loading additional HDUs from a file
# while it is being streamed to. In the future that might be supported
# but for now this is only used for the purpose of lazy-loading of
# existing HDUs.
if file is None:
self._read_all = True
elif self._file is not None:
# Should never attempt to read HDUs in ostream mode
self._read_all = self._file.mode == 'ostream'
else:
self._read_all = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError("Element {} in the HDUList input is "
"not an HDU.".format(idx))
super().__init__(hdus)
if file is None:
# Only do this when initializing from an existing list of HDUs
# When initalizing from a file, this will be handled by the
# append method after the first HDU is read
self.update_extend()
def __len__(self):
if not self._in_read_next_hdu:
self.readall()
return super().__len__()
def __repr__(self):
# In order to correctly repr an HDUList we need to load all the
# HDUs as well
self.readall()
return super().__repr__()
def __iter__(self):
# While effectively this does the same as:
# for idx in range(len(self)):
# yield self[idx]
# the more complicated structure is here to prevent the use of len(),
# which would break the lazy loading
for idx in itertools.count():
try:
yield self[idx]
except IndexError:
break
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
# If the key is a slice we need to make sure the necessary HDUs
# have been loaded before passing the slice on to super.
if isinstance(key, slice):
max_idx = key.stop
# Check for and handle the case when no maximum was
# specified (e.g. [1:]).
if max_idx is None:
# We need all of the HDUs, so load them
# and reset the maximum to the actual length.
max_idx = len(self)
# Just in case the max_idx is negative...
max_idx = self._positive_index_of(max_idx)
number_loaded = super().__len__()
if max_idx >= number_loaded:
# We need more than we have, try loading up to and including
# max_idx. Note we do not try to be clever about skipping HDUs
# even though key.step might conceivably allow it.
for i in range(number_loaded, max_idx):
# Read until max_idx or to the end of the file, whichever
# comes first.
if not self._read_next_hdu():
break
try:
hdus = super().__getitem__(key)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
else:
return HDUList(hdus)
# Originally this used recursion, but hypothetically an HDU with
# a very large number of HDUs could blow the stack, so use a loop
# instead
try:
return self._try_while_unread_hdus(super().__getitem__,
self._positive_index_of(key))
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def __contains__(self, item):
"""
Returns `True` if ``HDUList.index_of(item)`` succeeds.
"""
try:
self._try_while_unread_hdus(self.index_of, item)
except KeyError:
return False
return True
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self._positive_index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError('An element in the HDUList must be an HDU.')
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(item))
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(hdu))
try:
self._try_while_unread_hdus(super().__setitem__, _key, hdu)
except IndexError:
raise IndexError('Extension {} is out of bound or not found.'
.format(key))
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self._positive_index_of(key)
end_index = len(self) - 1
self._try_while_unread_hdus(super().__delitem__, key)
if (key == end_index or key == -1 and not self._resize):
self._truncate = True
else:
self._truncate = False
self._resize = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@classmethod
def fromfile(cls, fileobj, mode=None, memmap=None,
save_backup=False, cache=True, lazy_load_hdus=True,
**kwargs):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap,
save_backup=save_backup, cache=cache,
lazy_load_hdus=lazy_load_hdus, **kwargs)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer, memoryview, etc.
A string or other memory buffer containing an entire FITS file. It
should be noted that if that memory is read-only (such as a Python
string) the returned :class:`HDUList`'s data portions will also be
read-only.
kwargs : dict
Optional keyword arguments. See
:func:`astropy.io.fits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype='ubyte', buffer=data)
except TypeError:
raise TypeError(
'The provided object {} does not contain an underlying '
'memory buffer. fromstring() requires an object that '
'supports the buffer interface such as bytes, buffer, '
'memoryview, ndarray, etc. This restriction is to ensure '
'that efficient access to the array/table data is possible.'
''.format(data))
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary details information about the locations of
the indexed HDU within an associated file. Returns `None`
when the HDU is not associated with a file.
Dictionary contents:
========== ========================================================
Key Value
========== ========================================================
file File object associated with the HDU
filename Name of associated file object
filemode Mode in which the file was opened (readonly,
update, append, denywrite, ostream)
resized Flag that when `True` indicates that the data has been
resized since the last read/write so the returned values
may not be valid.
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ========================================================
"""
if self._file is not None:
output = self[index].fileinfo()
if not output:
# OK, the HDU associated with this index is not yet
# tied to the file associated with the HDUList. The only way
# to get the file object is to check each of the HDU's in the
# list until we find the one associated with the file.
f = None
for hdu in self:
info = hdu.fileinfo()
if info:
f = info['file']
fm = info['filemode']
break
output = {'file': f, 'filemode': fm, 'hdrLoc': None,
'datLoc': None, 'datSpan': None}
output['filename'] = self._file.name
output['resized'] = self._wasresized()
else:
output = None
return output
def __copy__(self):
"""
Return a shallow copy of an HDUList.
Returns
-------
copy : `HDUList`
A shallow copy of this `HDUList` object.
"""
return self[:]
# Syntactic sugar for `__copy__()` magic method
copy = __copy__
def __deepcopy__(self, memo=None):
return HDUList([hdu.copy() for hdu in self])
def pop(self, index=-1):
""" Remove an item from the list and return it.
Parameters
----------
index : int, str, tuple of (string, int), optional
An integer value of ``index`` indicates the position from which
``pop()`` removes and returns an HDU. A string value or a tuple
of ``(string, int)`` functions as a key for identifying the
HDU to be removed and returned. If ``key`` is a tuple, it is
of the form ``(key, ver)`` where ``ver`` is an ``EXTVER``
value that must match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous the numeric index
must be used to index the duplicate HDU.
Returns
-------
hdu : HDU object
The HDU object at position indicated by ``index`` or having name
and version specified by ``index``.
"""
# Make sure that HDUs are loaded before attempting to pop
self.readall()
list_index = self.index_of(index)
return super(HDUList, self).pop(list_index)
def insert(self, index, hdu):
"""
Insert an HDU into the `HDUList` at the given ``index``.
Parameters
----------
index : int
Index before which to insert the new HDU.
hdu : HDU object
The HDU object to insert
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(hdu))
num_hdus = len(self)
if index == 0 or num_hdus == 0:
if num_hdus != 0:
# We are inserting a new Primary HDU so we need to
# make the current Primary HDU into an extension HDU.
if isinstance(self[0], GroupsHDU):
raise ValueError(
"The current Primary HDU is a GroupsHDU. "
"It can't be made into an extension HDU, "
"so another HDU cannot be inserted before it.")
hdu1 = ImageHDU(self[0].data, self[0].header)
# Insert it into position 1, then delete HDU at position 0.
super().insert(1, hdu1)
super().__delitem__(0)
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().insert(0, phdu)
index = 1
else:
if isinstance(hdu, GroupsHDU):
raise ValueError('A GroupsHDU must be inserted as a '
'Primary HDU.')
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
hdu = ImageHDU(hdu.data, hdu.header)
super().insert(index, hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def append(self, hdu):
"""
Append a new HDU to the `HDUList`.
Parameters
----------
hdu : HDU object
HDU to add to the `HDUList`.
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('HDUList can only append an HDU.')
if len(self) > 0:
if isinstance(hdu, GroupsHDU):
raise ValueError(
"Can't append a GroupsHDU to a non-empty HDUList")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
# TODO: This isn't necessarily sufficient to copy the HDU;
# _header_offset and friends need to be copied too.
hdu = ImageHDU(hdu.data, hdu.header)
else:
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary
# HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().append(phdu)
super().append(hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def index_of(self, key):
"""
Get the index of an HDU from the `HDUList`.
Parameters
----------
key : int, str or tuple of (string, int)
The key identifying the HDU. If ``key`` is a tuple, it is of the
form ``(key, ver)`` where ``ver`` is an ``EXTVER`` value that must
match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous (it shouldn't be
but it's not impossible) the numeric index must be used to index
the duplicate HDU.
Returns
-------
index : int
The index of the HDU in the `HDUList`.
"""
if _is_int(key):
return key
elif isinstance(key, tuple):
_key, _ver = key
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError(
'{} indices must be integers, extension names as strings, '
'or (extname, version) tuples; got {}'
''.format(self.__class__.__name__, _key))
_key = (_key.strip()).upper()
found = None
for idx, hdu in enumerate(self):
name = hdu.name
if isinstance(name, str):
name = name.strip().upper()
# 'PRIMARY' should always work as a reference to the first HDU
if ((name == _key or (_key == 'PRIMARY' and idx == 0)) and
(_ver is None or _ver == hdu.ver)):
found = idx
break
if (found is None):
raise KeyError('Extension {!r} not found.'.format(key))
else:
return found
def _positive_index_of(self, key):
"""
Same as index_of, but ensures always returning a positive index
or zero.
(Really this should be called non_negative_index_of but it felt
too long.)
This means that if the key is a negative integer, we have to
convert it to the corresponding positive index. This means
knowing the length of the HDUList, which in turn means loading
all HDUs. Therefore using negative indices on HDULists is inherently
inefficient.
"""
index = self.index_of(key)
if index >= 0:
return index
if abs(index) > len(self):
raise IndexError(
'Extension {} is out of bound or not found.'.format(index))
return len(self) + index
def readall(self):
"""
Read data of all HDUs into memory.
"""
while self._read_next_hdu():
pass
@ignore_sigint
def flush(self, output_verify='fix', verbose=False):
"""
Force a write of the `HDUList` back to the file (for append and
update modes only).
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print verbose messages
"""
if self._file.mode not in ('append', 'update', 'ostream'):
warnings.warn("Flush for '{}' mode is not supported."
.format(self._file.mode), AstropyUserWarning)
return
if self._save_backup and self._file.mode in ('append', 'update'):
filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
backup = filename + '.bak'
idx = 1
while os.path.exists(backup):
backup = filename + '.bak.' + str(idx)
idx += 1
warnings.warn('Saving a backup of {} to {}.'.format(
filename, backup), AstropyUserWarning)
try:
shutil.copy(filename, backup)
except OSError as exc:
raise OSError('Failed to save backup to destination {}: '
'{}'.format(filename, exc))
self.verify(option=output_verify)
if self._file.mode in ('append', 'ostream'):
for hdu in self:
if verbose:
try:
extver = str(hdu._header['extver'])
except KeyError:
extver = ''
# only append HDU's which are "new"
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
with _free_space_check(self):
hdu._writeto(self._file)
if verbose:
print('append HDU', hdu.name, extver)
hdu._new = False
hdu._postwriteto()
elif self._file.mode == 'update':
self._flush_update()
def update_extend(self):
"""
Make sure that if the primary header needs the keyword ``EXTEND`` that
it has it and it is correct.
"""
if not len(self):
return
if not isinstance(self[0], PrimaryHDU):
# A PrimaryHDU will be automatically inserted at some point, but it
# might not have been added yet
return
hdr = self[0].header
def get_first_ext():
try:
return self[1]
except IndexError:
return None
if 'EXTEND' in hdr:
if not hdr['EXTEND'] and get_first_ext() is not None:
hdr['EXTEND'] = True
elif get_first_ext() is not None:
if hdr['NAXIS'] == 0:
hdr.set('EXTEND', True, after='NAXIS')
else:
n = hdr['NAXIS']
hdr.set('EXTEND', True, after='NAXIS' + str(n))
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(self, fileobj, output_verify='exception', overwrite=False,
checksum=False):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : file path, file object or file-like object
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if (len(self) == 0):
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, str) or fileobj_closed(fileobj)
# writeto is only for writing a new file from scratch, so the most
# sensible mode to require is 'ostream'. This can accept an open
# file object that's open to write only, or in append/update modes
# but only if the file doesn't exist.
fileobj = _File(fileobj, mode='ostream', overwrite=overwrite)
hdulist = self.fromfile(fileobj)
try:
dirname = os.path.dirname(hdulist._file.name)
except AttributeError:
dirname = None
with _free_space_check(self, dirname=dirname):
for hdu in self:
hdu._prewriteto(checksum=checksum)
hdu._writeto(hdulist._file)
hdu._postwriteto()
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify='exception', verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
try:
if (self._file and self._file.mode in ('append', 'update')
and not self._file.closed):
self.flush(output_verify=output_verify, verbose=verbose)
finally:
if self._file and closed and hasattr(self._file, 'close'):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file, bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = '(No file associated with this HDUList)'
else:
name = self._file.name
results = ['Filename: {}'.format(name),
'No. Name Ver Type Cards Dimensions Format']
format = '{:3d} {:10} {:3} {:11} {:5d} {} {} {}'
default = ('', '', '', 0, (), '', '')
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary):]
summary = (idx,) + summary
if output:
results.append(format.format(*summary))
else:
results.append(summary)
if output:
output.write('\n'.join(results))
output.write('\n')
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : a string containing the file name associated with the
HDUList object if an association exists. Otherwise returns
None.
"""
if self._file is not None:
if hasattr(self._file, 'name'):
return self._file.name
return None
@classmethod
def _readfrom(cls, fileobj=None, data=None, mode=None,
memmap=None, save_backup=False, cache=True,
lazy_load_hdus=True, **kwargs):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
fileobj = _File(fileobj, mode=mode, memmap=memmap, cache=cache)
# The Astropy mode is determined by the _File initializer if the
# supplied mode was None
mode = fileobj.mode
hdulist = cls(file=fileobj)
else:
if mode is None:
# The default mode
mode = 'readonly'
hdulist = cls(file=data)
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
hdulist._save_backup = save_backup
hdulist._open_kwargs = kwargs
if fileobj is not None and fileobj.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
# Make sure at least the PRIMARY HDU can be read
read_one = hdulist._read_next_hdu()
# If we're trying to read only and no header units were found,
# raise an exception
if not read_one and mode in ('readonly', 'denywrite'):
# Close the file if necessary (issue #6168)
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError('Empty or corrupt FITS file')
if not lazy_load_hdus:
# Go ahead and load all HDUs
while hdulist._read_next_hdu():
pass
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
return hdulist
def _try_while_unread_hdus(self, func, *args, **kwargs):
"""
Attempt an operation that accesses an HDU by index/name
that can fail if not all HDUs have been read yet. Keep
reading HDUs until the operation succeeds or there are no
more HDUs to read.
"""
while True:
try:
return func(*args, **kwargs)
except Exception:
if self._read_next_hdu():
continue
else:
raise
def _read_next_hdu(self):
"""
Lazily load a single HDU from the fileobj or data string the `HDUList`
was opened from, unless no further HDUs are found.
Returns True if a new HDU was loaded, or False otherwise.
"""
if self._read_all:
return False
saved_compression_enabled = compressed.COMPRESSION_ENABLED
fileobj, data, kwargs = self._file, self._data, self._open_kwargs
if fileobj is not None and fileobj.closed:
return False
try:
self._in_read_next_hdu = True
if ('disable_image_compression' in kwargs and
kwargs['disable_image_compression']):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
try:
if fileobj is not None:
try:
# Make sure we're back to the end of the last read
# HDU
if len(self) > 0:
last = self[len(self) - 1]
if last._data_offset is not None:
offset = last._data_offset + last._data_size
fileobj.seek(offset, os.SEEK_SET)
hdu = _BaseHDU.readfrom(fileobj, **kwargs)
except EOFError:
self._read_all = True
return False
except OSError:
# Close the file: see
# https://github.com/astropy/astropy/issues/6168
#
if self._file.close_on_error:
self._file.close()
if fileobj.writeonly:
self._read_all = True
return False
else:
raise
else:
if not data:
self._read_all = True
return False
hdu = _BaseHDU.fromstring(data, **kwargs)
self._data = data[hdu._data_offset + hdu._data_size:]
super().append(hdu)
if len(self) == 1:
# Check for an extension HDU and update the EXTEND
# keyword of the primary HDU accordingly
self.update_extend()
hdu._new = False
if 'checksum' in kwargs:
hdu._output_checksum = kwargs['checksum']
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
'Error validating header for HDU #{} (note: Astropy '
'uses zero-based indexing).\n{}\n'
'There may be extra bytes after the last HDU or the '
'file is corrupted.'.format(
len(self), indent(str(exc))), VerifyWarning)
del exc
self._read_all = True
return False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
self._in_read_next_hdu = False
return True
def _verify(self, option='warn'):
errs = _ErrList([], unit='HDU')
# the first (0th) element must be a primary HDU
if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and \
(not isinstance(self[0], _NonstandardHDU)):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = 'Fixed by inserting one as 0th HDU.'
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and ('EXTEND' not in self[0].header or
self[0].header['EXTEND'] is not True):
err_text = ('Primary HDU does not contain an EXTEND keyword '
'equal to T even though there are extension HDUs.')
fix_text = 'Fixed by inserting or updating the EXTEND keyword.'
def fix(header=self[0].header):
naxis = header['NAXIS']
if naxis == 0:
after = 'NAXIS'
else:
after = 'NAXIS' + str(naxis)
header.set('EXTEND', value=True, after=after)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = ("HDUList's element {} is not an "
"extension HDU.".format(str(idx)))
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == 'gzip':
new_file = gzip.GzipFile(name, mode='ab+')
elif self._file.compression == 'bzip2':
new_file = bz2.BZ2File(name, mode='w')
else:
new_file = name
with self.fromfile(new_file, mode='append') as hdulist:
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith('win'):
# Collect a list of open mmaps to the data; this well be
# used later. See below.
mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self) if hdu._has_data]
hdulist._file.close()
self._file.close()
if sys.platform.startswith('win'):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
mmap.close()
os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
os.chmod(old_name, old_mode)
if isinstance(new_file, gzip.GzipFile):
old_file = gzip.GzipFile(old_name, mode='rb+')
else:
old_file = old_name
ffo = _File(old_file, mode='update', memmap=old_memmap)
self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
# on each HDU
if hdu._has_data and _get_array_mmap(hdu.data) is not None:
del hdu.data
hdu._file = ffo
if sys.platform.startswith('win'):
# On Windows, all the original data mmaps were closed above.
# However, it's possible that the user still has references to
# the old data which would no longer work (possibly even cause
# a segfault if they try to access it). This replaces the
# buffers used by the original arrays with the buffers of mmap
# arrays created from the new file. This seems to work, but
# it's a flaming hack and carries no guarantees that it won't
# lead to odd behavior in practice. Better to just not keep
# references to data from files that had to be resized upon
# flushing (on Windows--again, this is no problem on Linux).
for idx, mmap, arr in mmaps:
if mmap is not None:
arr.data = self[idx].data.data
del mmaps # Just to be sure
else:
# The underlying file is not a file object, it is a file like
# object. We can't write out to a file, we must update the file
# like object in place. To do this, we write out to a temporary
# file, then delete the contents in our file like object, then
# write the contents of the temporary file to the now empty file
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
ffo = self._file
ffo.truncate(0)
ffo.seek(0)
for hdu in hdulist:
hdu._writeto(ffo, inplace=True, copy=True)
# Close the temporary file and delete it.
hdulist.close()
os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
self._truncate = False
for hdu in self:
hdu._header._modified = False
hdu._new = False
hdu._file = ffo
def _wasresized(self, verbose=False):
"""
Determine if any changes to the HDUList will require a file resize
when flushing the file.
Side effect of setting the objects _resize attribute.
"""
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
nbytes = len(str(hdu._header))
if nbytes != (hdu._data_offset - hdu._header_offset):
self._resize = True
self._truncate = False
if verbose:
print('One or more header is resized.')
break
# Data:
if not hdu._has_data:
continue
nbytes = hdu.size
nbytes = nbytes + _pad_length(nbytes)
if nbytes != hdu._data_size:
self._resize = True
self._truncate = False
if verbose:
print('One or more data area is resized.')
break
if self._truncate:
try:
self._file.truncate(hdu._data_offset + hdu._data_size)
except OSError:
self._resize = True
self._truncate = False
return self._resize
|
625ecb626dea12977af4250dafb23d0af4c27521cff35af56959c3ee8bb777f3 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import pytest
import numpy as np
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from ....io import fits
from ....tests.helper import catch_warnings, ignore_warnings
from ....utils.compat import NUMPY_LT_1_14_1, NUMPY_LT_1_14_2
from ....utils.exceptions import AstropyDeprecationWarning
from ..column import Delayed, NUMPY2FITS
from ..util import decode_ascii
from ..verify import VerifyError
from . import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == 'float32' or bb.dtype.name == 'float32':
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.
if np.any(mask0):
if diff[mask0].max() != 0.:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == 'S':
fielda = decode_ascii(fielda)
if fieldb.dtype.char == 'S':
fieldb = decode_ascii(fieldb)
if (not isinstance(fielda, type(fieldb)) and not
isinstance(fieldb, type(fielda))):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print('field {0} type differs'.format(i))
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print('field {0} differs'.format(i))
return False
elif (isinstance(fielda, fits.column._VLF) or
isinstance(fieldb, fits.column._VLF)):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print('fielda[{0}]: {1}'.format(row, fielda[row]))
print('fieldb[{0}]: {1}'.format(row, fieldb[row]))
print('field {0} differs in row {1}'.format(i, row))
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print('field {0} differs'.format(i))
return False
return True
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr['FILENAME'] = 'labq01i3q_rawtag.fits'
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# open some existing FITS files:
tt = fits.open(self.data('tb.fits'))
fd = fits.open(self.data('test0.fits'))
# create some local arrays
a1 = chararray.array(['abc', 'def', 'xx'])
r1 = np.array([11., 12., 13.], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name='abc', format='3A', array=a1)
c2 = fits.Column(name='def', format='E', array=r1)
a3 = np.array([3, 4, 5], dtype='i2')
c3 = fits.Column(name='xyz', format='I', array=a3)
a4 = np.array([1, 2, 3], dtype='i2')
c4 = fits.Column(name='t1', format='I', array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8')
c5 = fits.Column(name='t2', format='C', array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name='t3', format='X', array=a6)
a7 = np.array([101, 102, 103], dtype='i4')
c7 = fits.Column(name='t4', format='J', array=a7)
a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)
c8 = fits.Column(name='t5', format='11X', array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view('bool')).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
xx = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp('tableout1.fits'), overwrite=True)
with fits.open(self.temp('tableout1.fits')) as f2:
temp = f2[1].data.field(7)
assert (temp[0] == [True, True, False, True, False, True,
True, True, False, False, True]).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp('tableout2.fits'), 'append')
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data('tb.fits'))
assert t[1].header['tform1'] == '1J'
info = {'name': ['c1', 'c2', 'c3', 'c4'],
'format': ['1J', '3A', '1E', '1L'],
'unit': ['', '', '', ''],
'null': [-2147483647, '', '', ''],
'bscale': ['', '', 3, ''],
'bzero': ['', '', 0.4, ''],
'disp': ['I11', 'A3', 'G15.7', 'L6'],
'start': ['', '', '', ''],
'dim': ['', '', '', ''],
'coord_inc': ['', '', '', ''],
'coord_type': ['', '', '', ''],
'coord_unit': ['', '', '', ''],
'coord_ref_point': ['', '', '', ''],
'coord_ref_value': ['', '', '', ''],
'time_ref_pos': ['', '', '', '']}
assert t[1].columns.info(output=False) == info
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field('c4')[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]'
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data('ascii.fits'))
ra1 = np.rec.array([
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345)], names='c1, c2')
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names='c1, c2')
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array([
(10.123000144958496, 37),
(15.609999656677246, 17),
(345.0, 345)
], names='c1, c2')
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(['abcd', 'def'])
r1 = np.array([11., 12.])
c1 = fits.Column(name='abc', format='A3', start=19, array=a1)
c2 = fits.Column(name='def', format='E', start=3, array=r1)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert (dict(hdu.data.dtype.fields) ==
{'abc': (np.dtype('|S3'), 18),
'def': (np.dtype('|S15'), 2),
't1': (np.dtype('|S10'), 21)})
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11., 12.])
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with open(self.temp('toto.fits')) as f:
assert '4.95652173913043548D+00' in f.read()
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype='uint8')
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
hduL = fits.open(self.temp('testendian.fits'))
rfiHDU = hduL['RFI']
data = rfiHDU.data
channelsOut = data.field('Channels')[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1., 2., 3., 4.]
a1 = np.array(a, dtype='<f8')
a2 = np.array(a, dtype='>f8')
col1 = fits.Column(name='a', format='D', array=a1)
col2 = fits.Column(name='b', format='D', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data['a'] == a1).all()
assert (tbhdu.data['b'] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
with fits.open(self.temp('testendian.fits')) as hdul:
assert (hdul[1].data['a'] == a2).all()
assert (hdul[1].data['b'] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float32,a10',
names='order,name,mag,Sp')
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'S20', 'float32', 'S10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'U20', 'float32', 'U10']})
a = np.array([(1, u'Serius', -1.45, u'A1V'),
(2, u'Canopys', -0.73, u'F0Ib'),
(3, u'Rigil Kent', -0.1, u'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == 'Serius'
assert hdu.data[1][1] == 'Canopys'
assert (hdu.data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == 'A1V'
assert hdu.data[1][3] == 'F0Ib'
with ignore_warnings():
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert (hdul[1].data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == 'Serius'
assert hdul[1].data[1][1] == 'Canopys'
assert (hdul[1].data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdul[1].data[0][3] == 'A1V'
assert hdul[1].data[1][3] == 'F0Ib'
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
assert comparerecords(hdu.data, tmp)
with ignore_warnings():
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
h = fits.open(self.data('tb.fits'))
data = h[1].data
new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith('FITS_rec(')
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array)
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp('newtable.fits'))
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]',
'')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True),
('NGC5', 412, '', z, False),
('NGC6', 434, '', z, True),
('NGC7', 408, '', z, False),
('NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu1.data, array)
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target1', format='10A', array=names)
c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes1', format='A10')
c4 = fits.Column(name='spectrum1', format='5E')
c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp('newtable.fits'))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 30, '4R x 10C',
'[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
assert (hdu.columns.names ==
['target', 'counts', 'notes', 'spectrum', 'flag', 'target1',
'counts1', 'notes1', 'spectrum1', 'flag1'])
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {'a': 2, 'b': 'b', 'c': 2.3}
data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'S1'), ('c', float)])
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
header = hdul[1].header
assert header['TNULL1'] == 2
assert header['TNULL2'] == 'b'
assert header['TNULL3'] == 2.3
@pytest.mark.xfail(not NUMPY_LT_1_14_1 and NUMPY_LT_1_14_2,
reason="See https://github.com/astropy/astropy/issues/7214")
def test_mask_array(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
mask = tbdata.field('V_mag') > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp('newtable.fits'))
hdul = fits.open(self.temp('newtable.fits'))
# numpy >= 1.12 changes how structured arrays are printed, so we
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
row = t1[1].data[2]
assert row['counts'] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ''
assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
row['counts'] = 310
assert row['counts'] == 310
row[1] = 315
assert row['counts'] == 315
assert row[1:4]['counts'] == 315
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
assert row['counts'] == 300
row[1:4][0] = 400
assert row[1:4]['counts'] == 400
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]['counts'] == 500
row[1:4:2][0] = 300
assert row[1:4]['counts'] == 300
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
assert row[1:4].field(0) == 300
assert row[1:4].field('counts') == 300
pytest.raises(KeyError, row[1:4].field, 'flag')
row[1:4].setfield('counts', 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, 'flag', False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name='target', format='10A')
c2 = fits.Column(name='counts', format='J', unit='DN')
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L')
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = ('NGC1', 312, 'A Note',
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True)
# Test assigning data to a tables row using a list
tbhdu.data[3] = ['JIM1', '33', 'A Note',
np.array([1., 2., 3., 4., 5.], dtype=np.float32),
True]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == 'NGC1'
assert tbhdu.columns.columns[2].array[0] == ''
assert (tbhdu.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[0] == True # nopep8
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == 'JIM1'
assert tbhdu.columns.columns[2].array[3] == 'A Note'
assert (tbhdu.columns.columns[3].array[3] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[3] == True # nopep8
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.data._coldefs._arrays[0]))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns.columns[0].array))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns._arrays[0]))
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == 'NGC1'
assert tbhdu2.columns.columns[2].array[0] == ''
assert (tbhdu2.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[0] == True # nopep8
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == 'NGC5'
assert tbhdu2.columns.columns[2].array[4] == ''
assert (tbhdu2.columns.columns[3].array[4] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[4] == False # nopep8
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ''
assert tbhdu2.columns.columns[2].array[8] == ''
assert (tbhdu2.columns.columns[3].array[8] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[8] == False # nopep8
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert (id(coldefs.columns[0].array) !=
id(tbhdu.columns.columns[0].array))
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.data._coldefs._arrays[0]))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns.columns[0].array))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = hducls(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = hducls(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert 'EXTVER' not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header['EXTVER'] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header['EXTVER'] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header['EXTVER'] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name=u'spam', format='E', array=[42.])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name='flag', format='2L',
array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (tbhdu1.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu1.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (tbhdu.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
def test_fits_rec_column_access(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
assert (tbdata.V_mag == tbdata.field('V_mag')).all()
assert (tbdata.V_mag == tbdata['V_mag']).all()
t.close()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data('zerowidth.fits'))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert 'ORBPARM' in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.writeto(self.temp('newtable.fits'))
hdul.close()
hdul = fits.open(self.temp('newtable.fits'))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert 'ORBPARM' in tbhdu.columns.names
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.close()
def test_string_column_padding(self):
a = ['img1', 'img2', 'img3a', 'p']
s = 'img1\x00\x00\x00\x00\x00\x00' \
'img2\x00\x00\x00\x00\x00\x00' \
'img3a\x00\x00\x00\x00\x00' \
'p\x00\x00\x00\x00\x00\x00\x00\x00\x00'
acol = fits.Column(name='MEMNAME', format='A10',
array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tostring().decode('raw-unicode-escape') == s
ahdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tostring().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
with ignore_warnings():
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert (hdul[1].data.tostring().decode('raw-unicode-escape') ==
s.replace('\x00', ' '))
assert (hdul[1].data['MEMNAME'] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tostring().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[([0, 1, 2, 3, 4, 5], 'row1' * 2),
([6, 7, 8, 9, 0, 1], 'row2' * 2),
([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8')
thdu = fits.BinTableHDU.from_columns(data)
# Modify the TDIM fields to my own specification
thdu.header['TDIM1'] = '(2,3)'
thdu.header['TDIM2'] = '(4,2)'
thdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]]])).all()
assert (c2 == np.array([['row1', 'row1'],
['row2', 'row2'],
['row3', 'row3']])).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)])
data['x'] = 1, 2, 3
data['s'] = 'ok'
with ignore_warnings():
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))])
data['x'] = 1, 2, 3
data['s'] = 'ok'
del t
with ignore_warnings():
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4, 3)
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b'abcd', b'efgh'],
[b'ijkl', b'mnop'],
[b'qrst', b'uvwx']]
arr = np.array([(data,), (data,), (data,), (data,), (data,)],
dtype=[('S', '(3, 2)S4')])
with catch_warnings() as w:
tbhdu1 = fits.BinTableHDU(data=arr)
assert len(w) == 0
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(4,2,3)'
assert tbhdu2.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
assert np.all(tbhdu2.data['S'] == tbhdu.data['S'])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']]
arr2 = [1, 2, 3, 4, 5]
arr = np.array([(arr1, arr2), (arr1, arr2)],
dtype=[('a', '(3, 2)S2'), ('b', '5i8')])
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp('test.fits'), 'wb') as f:
f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)'))
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(2,2,2)'
assert tbhdu2.header['TFORM1'] == '12A'
for row in tbhdu2.data:
assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']])
assert np.all(row['b'] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [['abc', 'def', 'ghi'],
['jkl', 'mno', 'pqr'],
['stu', 'vwx', 'yz ']]
recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3'])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
with fits.open(self.temp('test.fits')) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(['a', 'b'], dtype='|S1')
arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2')
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name='str', format='1A', array=arra),
fits.Column(name='strarray', format='4A', dim='(2,2)',
array=arrb),
fits.Column(name='intarray', format='4I', dim='(2, 2)',
array=arrc)
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data['str'].encode('ascii') == arra).all()
assert (h[1].data['strarray'].encode('ascii') == arrb).all()
assert (h[1].data['intarray'] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [fits.Column(name='a', format='20I', dim='(2,2)',
array=arra),
fits.Column(name='b', format='4I', dim='(2,2)',
array=arrb)]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM1'] == '20I'
assert h[1].header['TFORM2'] == '4I'
assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)'
assert (h[1].data['a'] == arra).all()
assert (h[1].data['b'] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(VerifyError, fits.Column, name='a', format='2I',
dim='(2,2)', array=arra)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
hdulist = fits.open(self.data('tdim.fits'))
assert hdulist[1].data['V_mag'].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
f = fits.open(self.data('table.fits'))
data = f[1].data
targets = data.field('target')
s = data[:]
assert (s.field('target') == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field('target') == targets[:n]).all()
s = data[n:]
assert (s.field('target') == targets[n:]).all()
s = data[::2]
assert (s.field('target') == targets[::2]).all()
s = data[::-1]
assert (s.field('target') == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
f = fits.open(self.data('table.fits'))
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data('table.fits')) as hdu:
data = hdu[1].data
data['V_mag'] = 0
assert np.all(data['V_mag'] == 0)
data['V_mag'] = 1
assert np.all(data['V_mag'] == 1)
for container in (list, tuple, np.array):
data['V_mag'] = container([1, 2, 3])
assert np.array_equal(data['V_mag'], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
f = fits.open(self.data('table.fits'), mode='readonly')
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_dump_load_round_trip(self):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
# Double check that the headers are equivalent
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array([('a', [1, 2, 3, 4], 0.1),
('b', [5, 6, 7, 8], 0.2)],
formats='a1,4i4,f8')
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name='c0', format='L', array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name='c2', format='B', array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name='c3', format='I', array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name='c4', format='J', array=a4)
a5 = np.array(['a', 'abc', 'ab'])
c5 = fits.Column(name='c5', format='A3', array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name='c6', format='D', array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j],
dtype=np.complex128)
c7 = fits.Column(name='c7', format='M', array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name='c8', format='PJ()', array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp('data.txt')
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name conincides
with an existing attribute/method of the array, the existing name takes
precence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name='names', format='I', array=[1])
c2 = fits.Column(name='formats', format='I', array=[2])
c3 = fits.Column(name='other', format='I', array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ['names', 'formats', 'other']
assert t.data.formats == ['I'] * 3
assert (t.data['names'] == [1]).all()
assert (t.data['formats'] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1')
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp('table.fits'))
data = fits.getdata(self.temp('table.fits'), ext=1)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data['a'] == arr['a']).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column('F1', 'L', array=[True, False])
c2 = fits.Column('F2', 'L', array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp('table.fits'))
with fits.open(self.temp('table.fits'), mode='update') as hdul:
hdul[1].data['F1'][1] = True
hdul[1].data['F2'][0] = True
with fits.open(self.temp('table.fits')) as hdul:
assert (hdul[1].data['F1'] == [True, True]).all()
assert (hdul[1].data['F2'] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column('F1', 'A3', null='---',
array=np.array(['1.0', '2.0', '---', '3.0']),
ascii=True)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp('test.fits'))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp('test.fits'), mode='update') as h:
h[1].header['TFORM1'] = 'E3'
del h[1].header['TNULL1']
with fits.open(self.temp('test.fits')) as h:
pytest.raises(ValueError, lambda: h[1].data['F1'])
try:
with fits.open(self.temp('test.fits')) as h:
h[1].data['F1']
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data")
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = u' '
c1 = fits.Column('F1', format='I8', null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp('ascii_null.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null.fits'), mode='r+') as h:
nulled = h.read().replace(u'2 ', u' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null.fits'), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = 'NaN'
c2 = fits.Column('F1', format='F12.8', null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp('ascii_null2.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null2.fits'), mode='r+') as h:
nulled = h.read().replace(u'3.00000000', u' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('tb.fits')) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['NAXIS'] == 2
assert h[1].header['NAXIS1'] == 12
assert h[1].header['NAXIS2'] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
direclty from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unecessary processing of the data.
"""
with fits.open(self.data('table.fits')) as h:
h[1].writeto(self.temp('test.fits'))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert 'data' not in h[1].__dict__
with fits.open(self.data('table.fits')) as h1:
with fits.open(self.temp('test.fits')) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data('table.fits'))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data('tb.fits')) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
with ignore_warnings():
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata['c1'] == tbdata2['c1'])
assert np.all(tbdata['c2'] == tbdata2['c2'])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(tbdata['c3'].astype(np.float32) ==
tbdata2['c3'].astype(np.float32))
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata['c4'], 'T', 'F') ==
tbdata2['c4'])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
with ignore_warnings():
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[
('a', 'i8'),
('b', 'S64'),
('c', ('i4', (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header['NAXIS1'] == 96
assert hdu.header['NAXIS2'] == 0
assert hdu.header['TDIM3'] == '(2,3)'
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data('random_groups.fits'))['DATA']
col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',
format='1152E')
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[1].data['TEST'] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data('tb.fits'))
data2 = fits.getdata(self.data('tb.fits'))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1):] = data2
mask = merged['c1'] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data('tb.fits')))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([('abc',)], dtype=[('a', 'S3')])
fits.writeto(self.temp('test.fits'), data)
with fits.open(self.temp('test.fits'), mode='update') as hdul:
hdul[1].data['a'][0] = 'XYZ'
assert hdul[1].data['a'][0] == 'XYZ'
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].data['a'][0] == 'XYZ'
# Test update but with a non-trivial TDIMn
data = np.array([([['abc', 'def', 'geh'],
['ijk', 'lmn', 'opq']],)],
dtype=[('a', ('S3', (2, 3)))])
fits.writeto(self.temp('test2.fits'), data)
expected = [['abc', 'def', 'geh'],
['ijk', 'XYZ', 'opq']]
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data['a'][0, 1, 1] = 'XYZ'
assert np.all(hdul[1].data['a'][0] == expected)
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
assert np.all(hdul[1].data['a'][0] == expected)
@pytest.mark.skipif(str('not HAVE_OBJGRAPH'))
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting('FITS_rec'):
readfile(self.data('memtest.fits'))
@pytest.mark.skipif(str('not HAVE_OBJGRAPH'))
def test_reference_leak2(self, tmpdir):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_core import TestCore
from .test_connect import TestMultipleHDU
t1 = TestCore()
t1.setup()
try:
with _refcounting('FITS_rec'):
t1.test_add_del_columns2()
finally:
t1.teardown()
del t1
t2 = self.__class__()
for test_name in ['test_recarray_to_bintablehdu',
'test_numpy_ndarray_to_bintablehdu',
'test_new_table_from_recarray',
'test_new_fitsrec']:
t2.setup()
try:
with _refcounting('FITS_rec'):
getattr(t2, test_name)()
finally:
t2.teardown()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting('FITS_rec'):
t3.test_read(tmpdir)
finally:
t3.teardown_class()
del t3
def test_dump_clobber_vs_overwrite(self):
with fits.open(self.data('table.fits')) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
tbhdu.dump(datafile, cdfile, hfile, clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name='A', format='1J', bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
# Test that the file wrote out correctly
with fits.open(self.temp('test.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == data)
# Test updating the unsigned int data
hdu.data['A'][0] = 99
hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'),
format='1I', bscale=1, bzero=32768)
S = fits.HDUList([fits.PrimaryHDU(),
fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data['c1'][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data['c1'] == 2
# Read and change value in memory
X = fits.open(self.temp("a.fits"))
X[1].data['c1'][0] = 10
assert X[1].data['c1'][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data['c1'][0] == 10
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert len(objgraph.by_type(type_)) <= refcount, \
"More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[[0] * 1571] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
with ignore_warnings():
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as toto:
q = toto[1].data.field('QUAL_SPE')
assert (q[0][4:8] ==
np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith('J(1571)')
for code in ('PJ()', 'QJ()'):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name='TESTVLF', format=format_code, array=arr)
col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data['TESTSCA']) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data['TESTVLF']) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all()
assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all()
assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all()
assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])]
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']),
np.array(['f'])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ['a', 'ab', 'abc']
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
with ignore_warnings():
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[np.arange(1572)] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
with ignore_warnings():
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
data = fits.getdata(self.temp('toto.fits'))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data['QUAL_SPE'], col.array):
assert (row_a == row_b).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column('test', format='J', array=np.arange(255))
c1 = fits.Column('A', format='PJ', array=arr1)
c2 = fits.Column('B', format='PJ', array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp('test.fits'), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM2'] == 'PJ(255)'
assert h[2].header['TFORM2'] == 'PJ(255)'
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp('test.fits')) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp('test3.fits'))
with fits.open(self.temp('test3.fits')) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp('test2.fits'))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp('test2.fits'), mode='append') as new_hdul:
for _ in range(2):
with fits.open(self.temp('test.fits')) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp('test2.fits')) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column('TEST', np.dtype(recformat))
c.format == fitsformat
c = fits.Column('TEST', recformat)
c.format == fitsformat
c = fits.Column('TEST', fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column('TEST', 'I4')
assert c.format == 'I4'
assert c.format.format == 'I'
assert c.format.width == 4
c = fits.Column('TEST', 'F15.8')
assert c.format == 'F15.8'
assert c.format.format == 'F'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'E15.8')
assert c.format.format == 'E'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'D15.8')
assert c.format.format == 'D'
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column('TEST', 'F10.0')
assert c.format.format == 'F'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'E10.0')
assert c.format.format == 'E'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'D10.0')
assert c.format.format == 'D'
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column('TEST', 'I')
assert c.format == 'I'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I', ascii=True)
assert c.format == 'I10'
c = fits.Column('TEST', 'E')
assert c.format == 'E'
assert c.format.recformat == 'f4'
c = fits.Column('TEST', 'E', ascii=True)
assert c.format == 'E15.7'
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column('TEST', 'F')
assert c.format == 'F16.7'
c = fits.Column('TEST', 'D')
assert c.format == 'D'
assert c.format.recformat == 'f8'
c = fits.Column('TEST', 'D', ascii=True)
assert c.format == 'D25.17'
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['TFORM1'] == 'F5.0'
assert hdul[1].data['TEST'].dtype == np.dtype('float64')
assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, 'TEST')
assert raw.tostring() == b' 1. 2. 3.'
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name='a', format='D')
b = fits.Column(name='b', format='D')
cols = fits.ColDefs([a, b])
assert cols['a'] == cols[0]
assert cols['b'] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns([
fits.Column('a', format='D'),
fits.Column('b', format='D')])
b = table.columns['b']
table.columns.del_col('b')
assert table.data.dtype.names == ('a',)
b.name = 'HELLO'
assert b.name == 'HELLO'
assert 'TTYPE2' not in table.header
assert table.header['TTYPE1'] == 'a'
assert table.columns.names == ['a']
with pytest.raises(KeyError):
table.columns['b']
# Make sure updates to the remaining column still work
table.columns.change_name('a', 'GOODBYE')
with pytest.raises(KeyError):
table.columns['a']
assert table.columns['GOODBYE'].name == 'GOODBYE'
assert table.data.dtype.names == ('GOODBYE',)
assert table.columns.names == ['GOODBYE']
assert table.data.columns.names == ['GOODBYE']
table.columns['GOODBYE'].name = 'foo'
with pytest.raises(KeyError):
table.columns['GOODBYE']
assert table.columns['foo'].name == 'foo'
assert table.data.dtype.names == ('foo',)
assert table.columns.names == ['foo']
assert table.data.columns.names == ['foo']
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
c = fits.Column(1, format='I', array=[1, 2, 3, 4, 5])
assert 'Column name must be a string able to fit' in str(err.value)
with pytest.raises(VerifyError) as err:
c = fits.Column('col', format='I', null='Nan', disp=1, coord_type=1,
coord_unit=2, coord_ref_point='1', coord_ref_value='1',
coord_inc='1', time_ref_pos=1)
err_msgs = ['keyword arguments to Column were invalid', 'TNULL', 'TDISP',
'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS']
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
c = fits.Column('a', format='B', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value)
with pytest.raises(VerifyError) as err:
c = fits.Column('a', format='I', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value)
with pytest.raises(VerifyError) as err:
c = fits.Column('a', format='I', start='-56', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name='a', array=x, format='E')
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header['TTYPE1']
hdu.columns[0].name = 'b'
def test_table_to_hdu():
from ....table import Table
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
table.meta['foo'] = 'bar'
with catch_warnings() as w:
hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1}))
assert len(w) == 1
assert str(w[0].message).startswith("'not-a-unit' did not parse as"
" fits unit")
for name in 'abc':
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert hdu.header['FOO'] == 'bar'
assert hdu.header['TEST'] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(fits.FITS_rec)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr['TUNIT1'] = 'pixel'
hdr['TUNIT2'] = 'm'
hdr['TUNIT3'] = 'm'
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr['TCTYP2'] = 'RA---TAN'
hdr['TCTYP3'] = 'ANGLE'
hdr['TCRVL2'] = -999.0
hdr['TCRVL3'] = -999.0
hdr['TCRPX2'] = 1.0
hdr['TCRPX3'] = 1.0
hdr['TALEN2'] = 16384
hdr['TALEN3'] = 1024
hdr['TCUNI2'] = 'angstrom'
hdr['TCUNI3'] = 'deg'
# Other non-relevant keywords
hdr['RA'] = 1.5
hdr['DEC'] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith("The following keywords are now recognized as special")
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == 's'
assert hdu.columns[1].unit == 'pixel'
assert hdu.columns[2].unit is None
assert hdu.header['TUNIT1'] == 's'
assert hdu.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert 'TCTYP1' not in hdu.header
assert hdu.header['TCTYP2'] == 'RA---TAN'
assert hdu.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu.header['RA'] == 1.5
assert hdu.header['DEC'] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmpdir.join('test.fits').strpath
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with pytest.warns(None) as warning_list:
hdu2 = fits.open(filename)[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == 's'
assert hdu2.columns[1].unit == 'pixel'
assert hdu2.columns[2].unit is None
assert hdu2.header['TUNIT1'] == 's'
assert hdu2.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == 'RA---TAN'
assert hdu2.columns[2].coord_type == 'ANGLE'
assert 'TCTYP1' not in hdu2.header
assert hdu2.header['TCTYP2'] == 'RA---TAN'
assert hdu2.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu2.header['RA'] == 1.5
assert hdu2.header['DEC'] == 3.0
|
d159d04958f91301e1a74bc9bdb4a301812260b133379bd05f80ce88a0d090ae | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import glob
import io
import os
import platform
import sys
import copy
import pytest
import numpy as np
from ..verify import VerifyError
from ....io import fits
from ....tests.helper import raises, catch_warnings, ignore_warnings
from ....utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
from ....utils.compat import NUMPY_LT_1_12
from . import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
hdul = fits.open(self.data('o4sp040b0_raw.fits'))
hdul[4].name = 'Jim'
hdul[4].ver = 9
assert hdul[('JIM', 9)].header['extname'] == 'JIM'
def test_hdu_file_bytes(self):
hdul = fits.open(self.data('checksum.fits'))
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
hdul = fits.open(self.data('checksum.fits'))
res = hdul.fileinfo(0)
def test_fileinfo(**kwargs):
assert res['datSpan'] == kwargs.get('datSpan', 2880)
assert res['resized'] == kwargs.get('resized', False)
assert res['filename'] == self.data('checksum.fits')
assert res['datLoc'] == kwargs.get('datLoc', 8640)
assert res['hdrLoc'] == kwargs.get('hdrLoc', 0)
assert res['filemode'] == 'readonly'
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
hdul1 = fits.open(self.data('tb.fits'))
hdul.append(hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
hdul = fits.open(self.data('arange.fits'))
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
hdul = fits.open(self.data('tb.fits'))
hdul.append(hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
@raises(ValueError)
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
hdul1 = fits.open(self.data('tb.fits'))
hdul.insert(0, hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
hdul = fits.open(self.data('arange.fits'))
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
hdul = fits.open(self.data('tb.fits'))
hdul.insert(1, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters'),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
@raises(ValueError)
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
hdul = fits.open(self.data('tb.fits'))
hdul.insert(0, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'ImageHDU', 12, (), '', ''),
(3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
hdul = fits.open(self.data('tb.fits'))
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''),
(1, '', 1, 'ImageHDU', 12, (), '', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_filename(self):
"""Tests the HDUList filename method."""
hdul = fits.open(self.data('tb.fits'))
name = hdul.filename()
assert name == self.data('tb.fits')
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul = fits.open(tmpfile, mode='ostream')
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_shallow_copy(self):
"""
Tests that `HDUList.__copy__()` and `HDUList.copy()` return a
shallow copy (regression test for #7211).
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
"""
Tests that `HDUList.__deepcopy__()` returns a deep copy.
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
f = fits.open(self.data('test0.fits'))
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdul.append(fits.ImageHDU(header=f[1].header))
assert hdul[1].header['EXTNAME'] == 'SCI'
assert hdul[1].header['EXTVER'] == 1
assert hdul.index_of(('SCI', 1)) == 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode='update')
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data('test0.fits')).st_mtime
hdul = fits.open(self.data('test0.fits'))
hdul[0].header['FOO'] = 'BAR'
with catch_warnings(AstropyUserWarning) as w:
hdul.flush()
assert len(w) == 1
assert 'mode is not supported' in str(w[0].message)
assert oldmtime == os.stat(self.data('test0.fits')).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header['EXTEND']
hdul.verify('silentfix')
assert 'EXTEND' in hdul[0].header
assert hdul[0].header['EXTEND'] is True
def test_fix_malformed_naxisj(self):
"""
Tests that malformed NAXISj values are fixed sensibly.
"""
hdu = fits.open(self.data('arange.fits'))
# Malform NAXISj header data
hdu[0].header['NAXIS1'] = 11.0
hdu[0].header['NAXIS2'] = '10.0'
hdu[0].header['NAXIS3'] = '7'
# Axes cache needs to be malformed as well
hdu[0]._axes = [11.0, '10.0', '7']
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 11
assert hdu[0].header['NAXIS2'] == 10
assert hdu[0].header['NAXIS3'] == 7
def test_fix_wellformed_naxisj(self):
"""
Tests that wellformed NAXISj values are not modified.
"""
hdu = fits.open(self.data('arange.fits'))
# Fake new NAXISj header data
hdu[0].header['NAXIS1'] = 768
hdu[0].header['NAXIS2'] = 64
hdu[0].header['NAXIS3'] = 8
# Axes cache needs to be faked as well
hdu[0]._axes = [768, 64, 8]
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 768
assert hdu[0].header['NAXIS2'] == 64
assert hdu[0].header['NAXIS3'] == 8
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array(10))
image = fits.HDUList([hdu, sci])
image.writeto(self.temp('temp.fits'))
assert 'EXTEND' in hdu.header
assert hdu.header['EXTEND'] is True
def test_replace_memmaped_array(self):
# Copy the original before we modify it
hdul = fits.open(self.data('test0.fits'))
hdul.writeto(self.temp('temp.fits'))
hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
hdul = fits.open(self.temp('temp.fits'), memmap=True)
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
hdul = fits.open(self.data('test0.fits'),
do_not_scale_image_data=True)
info = hdul.info(output=False)
hdul.writeto(self.temp('temp.fits'))
with open(self.temp('temp.fits'), 'ab') as f:
f.seek(0, os.SEEK_END)
f.write(b'\0' * 2880)
with ignore_warnings():
assert info == fits.info(self.temp('temp.fits'), output=False,
do_not_scale_image_data=True)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp('temp.fits'))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index('END' + ' ' * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp('temp.fits'), 'r+b') as f:
f.seek(padding_start)
f.write('\0'.encode('ascii') * padding_len)
with catch_warnings(AstropyUserWarning) as w:
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == a).all()
assert ('contains null bytes instead of spaces' in
str(w[0].message))
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header['TEST{}'.format(idx)] = idx
idx += 1
hdu.writeto(self.temp('temp.fits'), checksum=True)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header['TEST1'] = 2
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data).all()
@pytest.mark.xfail(platform.system() == 'Windows' and not NUMPY_LT_1_12,
reason='https://github.com/astropy/astropy/issues/5797')
def test_update_resized_header(self):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the heade ris one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header['TEST{}'.format(idx)] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header['TEST{}'.format(idx)] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='append') as hdul:
hdul.append(hdu)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header['TEST{}'.format(idx)] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
@ignore_warnings()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of Astropy's built in test files
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, 'rb') as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif (hdul[idx].data.dtype.fields and
hdul2[idx].data.dtype.fields):
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif (any(dim == 0 for dim in hdul[idx].data.shape) or
any(dim == 0 for dim in hdul2[idx].data.shape)):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data,
hdul2[idx].data)
for filename in glob.glob(os.path.join(self.data_dir, '*.fits')):
if sys.platform == 'win32' and filename == 'zerowidth.fits':
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith('variable_length_table.fits'):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
# Test that creating an HDUList from something silly raises a TypeError
pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c'])
def test_save_backup(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file('scale.fits')
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header['TEST'] = 'TEST'
hdul[0].data[0] = 0
assert os.path.exists(self.temp('scale.fits.bak'))
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul1:
with fits.open(self.temp('scale.fits.bak'),
do_not_scale_image_data=True) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# One more time to see if multiple backups are made
hdul[0].header['TEST2'] = 'TEST'
hdul[0].data[0] = 1
assert os.path.exists(self.temp('scale.fits.bak'))
assert os.path.exists(self.temp('scale.fits.bak.1'))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
hdul_a = fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a)
hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b)
hdul_a[0].data = hdul_b[0].data
hdul_a.close()
hdul_b.close()
hdul_a = fits.open(self.temp('test_a.fits'))
assert np.all(hdul_a[0].data == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name='a', format='J', array=arr_a)
col_b = fits.Column(name='b', format='J', array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
hdul_a = fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a)
hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b)
hdul_a[1].data = hdul_b[1].data
hdul_a.close()
hdul_b.close()
hdul_a = fits.open(self.temp('test_a.fits'))
assert 'b' in hdul_a[1].columns.names
assert 'a' not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data['b'] == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
hdulist = fits.HDUList()
hdulist.append(fits.ImageHDU(name='a'))
assert 'a' in hdulist
assert 'A' in hdulist
assert ('a', 1) in hdulist
assert ('A', 1) in hdulist
assert 'b' not in hdulist
assert ('a', 2) not in hdulist
assert ('b', 1) not in hdulist
assert ('b', 2) not in hdulist
def test_overwrite_vs_clobber(self):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp('test_overwrite.fits'))
hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
hdulist.writeto(self.temp('test_overwrite.fits'), clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
def test_invalid_hdu_key_in_contains(self):
"""
Make sure invalid keys in the 'in' operator return False.
Regression test for https://github.com/astropy/astropy/issues/5583
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
# A more or less random assortment of things which are not valid keys.
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert not (key in hdulist)
def test_iteration_of_lazy_loaded_hdulist(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5585
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='nada'))
hdulist.append(fits.ImageHDU(name='SCI'))
filename = self.temp('many_extension.fits')
hdulist.writeto(filename)
f = fits.open(filename)
# Check that all extensions are read if f is not sliced
all_exts = [ext for ext in f]
assert len(all_exts) == 5
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Try a simple slice with no conditional on the ext. This is essentially
# the reported failure.
all_exts_but_zero = [ext for ext in f[1:]]
assert len(all_exts_but_zero) == 4
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Check whether behavior is proper if the upper end of the slice is not
# omitted.
read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI']
assert len(read_exts) == 2
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5594
The failure shows up when (in python 3+) you try to open a file
with unicode content that is not actually a FITS file. See:
https://github.com/astropy/astropy/issues/5594#issuecomment-266583218
"""
import codecs
filename = self.temp('not-fits-with-unicode.fits')
with codecs.open(filename, mode='w', encoding='utf=8') as f:
f.write(u'Ce\xe7i ne marche pas')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6168
The ResourceWarning shows up when (in python 3+) you try to
open a non-FITS file when using a filename.
"""
# To avoid creating the file multiple times the tests are
# all included in one test file. See the discussion to the
# PR at https://github.com/astropy/astropy/issues/6168
#
filename = self.temp('not-fits.fits')
with open(filename, mode='w') as f:
f.write('# header line\n')
f.write('0.1 0.2\n')
# Opening the file should raise an OSError however the file
# is opened (there are two distinct code paths, depending on
# whether ignore_missing_end is True or False).
#
# Explicit tests are added to make sure the file handle is not
# closed when passed in to fits.open. In this case the ResourceWarning
# was not raised, but a check is still included.
#
with catch_warnings(ResourceWarning) as ws:
# Make sure that files opened by the user are not closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=True)
assert len(ws) == 0
def test_pop_with_lazy_load(self):
filename = self.data('checksum.fits')
hdul = fits.open(filename)
# Try popping the hdulist before doing anything else. This makes sure
# that https://github.com/astropy/astropy/issues/7185 is fixed.
hdu = hdul.pop()
assert len(hdul) == 1
# Read the file again and try popping from the beginning
hdul2 = fits.open(filename)
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
# Just a sanity check
hdul3 = fits.open(filename)
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
hdul = fits.open(self.data('o4sp040b0_raw.fits'))
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(('SCI', 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop('SCI')
assert len(hdul) == 5
assert hdu_popped is hdu1
|
f614010b5433cac4549da89ca10afe22312418b66bf0d424ea40ea35315660c1 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import os
import shutil
import warnings
import pytest
import numpy as np
from ....io import fits
from ....table import Table
from .. import printdiff
from ....tests.helper import catch_warnings
from . import FitsTestCase
class TestConvenience(FitsTestCase):
def test_resource_warning(self):
warnings.simplefilter('always', ResourceWarning)
with catch_warnings() as w:
data = fits.getdata(self.data('test0.fits'))
assert len(w) == 0
with catch_warnings() as w:
header = fits.getheader(self.data('test0.fits'))
assert len(w) == 0
def test_fileobj_not_closed(self):
"""
Tests that file-like objects are not closed after being passed
to convenience functions.
Regression test for https://github.com/astropy/astropy/issues/5063
"""
f = open(self.data('test0.fits'), 'rb')
data = fits.getdata(f)
assert not f.closed
f.seek(0)
header = fits.getheader(f)
assert not f.closed
def test_table_to_hdu(self):
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
with catch_warnings() as w:
hdu = fits.table_to_hdu(table)
assert len(w) == 1
assert str(w[0].message).startswith("'not-a-unit' did not parse as"
" fits unit")
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert isinstance(hdu, fits.BinTableHDU)
filename = self.temp('test_table_to_hdu.fits')
hdu.writeto(filename, overwrite=True)
def test_table_to_hdu_convert_comment_convention(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table.meta['comments'] = ['This', 'is', 'a', 'comment']
hdu = fits.table_to_hdu(table)
assert hdu.header.get('comment') == ['This', 'is', 'a', 'comment']
with pytest.raises(ValueError):
hdu.header.index('comments')
def test_table_writeto_header(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5988
"""
data = np.zeros((5, ), dtype=[('x', float), ('y', int)])
h_in = fits.Header()
h_in['ANSWER'] = (42.0, 'LTU&E')
filename = self.temp('tabhdr42.fits')
fits.writeto(filename, data=data, header=h_in, overwrite=True)
h_out = fits.getheader(filename, ext=1)
assert h_out['ANSWER'] == 42
def test_image_extension_update_header(self):
"""
Test that _makehdu correctly includes the header. For example in the
fits.update convenience function.
"""
filename = self.temp('twoextension.fits')
hdus = [fits.PrimaryHDU(np.zeros((10, 10))),
fits.ImageHDU(np.zeros((10, 10)))]
fits.HDUList(hdus).writeto(filename)
fits.update(filename,
np.zeros((10, 10)),
header=fits.Header([('WHAT', 100)]),
ext=1)
h_out = fits.getheader(filename, ext=1)
assert h_out['WHAT'] == 100
def test_printdiff(self):
"""
Test that FITSDiff can run the different inputs without crashing.
"""
# Testing different string input options
assert printdiff(self.data('arange.fits'),
self.data('blank.fits')) is None
assert printdiff(self.data('arange.fits'),
self.data('blank.fits'), ext=0) is None
assert printdiff(self.data('o4sp040b0_raw.fits'),
self.data('o4sp040b0_raw.fits'),
extname='sci') is None
# This may seem weird, but check printdiff to see, need to test
# incorrect second file
with pytest.raises(OSError):
printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci')
# Test HDU object inputs
with fits.open(self.data('stddata.fits'), mode='readonly') as in1:
with fits.open(self.data('checksum.fits'), mode='readonly') as in2:
assert printdiff(in1[0], in2[0]) is None
with pytest.raises(ValueError):
printdiff(in1[0], in2[0], ext=0)
assert printdiff(in1, in2) is None
with pytest.raises(NotImplementedError):
printdiff(in1, in2, 0)
def test_tabledump(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6937
"""
# copy fits file to the temp directory
filename = self.data('tb.fits')
temp_filename = self.temp('tb.fits')
shutil.copyfile(filename, temp_filename)
# test without datafile
fits.tabledump(temp_filename)
assert os.path.isfile(self.temp('tb_1.txt'))
# test with datafile
fits.tabledump(temp_filename, datafile=self.temp('test_tb.txt'))
assert os.path.isfile(self.temp('test_tb.txt'))
|
3e3004481aa4e722b6925bbee7d9b5ade058fd66f2ad00740bbd7ec12b58f421 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
from asdf.extension import AsdfExtension, BuiltinExtension
from asdf.resolver import Resolver, DEFAULT_URL_MAPPING
from asdf.util import filepath_to_url
# Make sure that all tag implementations are imported by the time we create
# the extension class so that _astropy_asdf_types is populated correctly. We
# could do this using __init__ files, except it causes pytest import errors in
# the case that asdf is not installed.
from .tags.coordinates.angle import *
from .tags.coordinates.representation import *
from .tags.coordinates.frames import *
from .tags.fits.fits import *
from .tags.table.table import *
from .tags.time.time import *
from .tags.transform.basic import *
from .tags.transform.compound import *
from .tags.transform.polynomial import *
from .tags.transform.projections import *
from .tags.transform.tabular import *
from .tags.unit.quantity import *
from .tags.unit.unit import *
from .types import _astropy_types, _astropy_asdf_types
__all__ = ['AstropyExtension', 'AstropyAsdfExtension']
ASTROPY_SCHEMA_URI_BASE = 'http://astropy.org/schemas/'
SCHEMA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'schemas'))
ASTROPY_URL_MAPPING = [
(ASTROPY_SCHEMA_URI_BASE,
filepath_to_url(
os.path.join(SCHEMA_PATH, 'astropy.org')) +
'/{url_suffix}.yaml')]
# This extension is used to register custom types that have both tags and
# schemas defined by Astropy.
class AstropyExtension(AsdfExtension):
@property
def types(self):
return _astropy_types
@property
def tag_mapping(self):
return [('tag:astropy.org:astropy',
ASTROPY_SCHEMA_URI_BASE + 'astropy{tag_suffix}')]
@property
def url_mapping(self):
return ASTROPY_URL_MAPPING
# This extension is used to register custom tag types that have schemas defined
# by ASDF, but have tag implementations defined in astropy.
class AstropyAsdfExtension(BuiltinExtension):
@property
def types(self):
return _astropy_asdf_types
|
a6eeea445be13157eba2b170e2ca5ca414d8d27866c37f254c0fe793ba9dda08 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from asdf import yamlutil
from astropy import modeling
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['AffineType', 'Rotate2DType', 'Rotate3DType']
class AffineType(TransformType):
name = "transform/affine"
version = '1.2.0'
types = ['astropy.modeling.projections.AffineTransformation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
matrix = node['matrix']
translation = node['translation']
if matrix.shape != (2, 2):
raise NotImplementedError(
"asdf currently only supports 2x2 (2D) rotation transformation "
"matrices")
if translation.shape != (2,):
raise NotImplementedError(
"asdf currently only supports 2D translation transformations.")
return modeling.projections.AffineTransformation2D(
matrix=matrix, translation=translation)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'matrix': _parameter_to_value(model.matrix),
'translation': _parameter_to_value(model.translation)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (a.__class__ == b.__class__)
assert_array_equal(a.matrix, b.matrix)
assert_array_equal(a.translation, b.translation)
class Rotate2DType(TransformType):
name = "transform/rotate2d"
version = '1.2.0'
types = ['astropy.modeling.rotations.Rotation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return modeling.rotations.Rotation2D(node['angle'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'angle': _parameter_to_value(model.angle)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.rotations.Rotation2D) and
isinstance(b, modeling.rotations.Rotation2D))
assert_array_equal(a.angle, b.angle)
class Rotate3DType(TransformType):
name = "transform/rotate3d"
version = '1.2.0'
types = ['astropy.modeling.rotations.RotateNative2Celestial',
'astropy.modeling.rotations.RotateCelestial2Native',
'astropy.modeling.rotations.EulerAngleRotation']
@classmethod
def from_tree_transform(cls, node, ctx):
if node['direction'] == 'native2celestial':
return modeling.rotations.RotateNative2Celestial(node["phi"],
node["theta"],
node["psi"])
elif node['direction'] == 'celestial2native':
return modeling.rotations.RotateCelestial2Native(node["phi"],
node["theta"],
node["psi"])
else:
return modeling.rotations.EulerAngleRotation(node["phi"],
node["theta"],
node["psi"],
axes_order=node["direction"])
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.rotations.RotateNative2Celestial):
try:
node = {"phi": _parameter_to_value(model.lon),
"theta": _parameter_to_value(model.lat),
"psi": _parameter_to_value(model.lon_pole),
"direction": "native2celestial"
}
except AttributeError:
node = {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "native2celestial"
}
elif isinstance(model, modeling.rotations.RotateCelestial2Native):
try:
node = {"phi": _parameter_to_value(model.lon),
"theta": _parameter_to_value(model.lat),
"psi": _parameter_to_value(model.lon_pole),
"direction": "celestial2native"
}
except AttributeError:
node = {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "celestial2native"
}
else:
node = {"phi": _parameter_to_value(model.phi),
"theta": _parameter_to_value(model.theta),
"psi": _parameter_to_value(model.psi),
"direction": model.axes_order
}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
if a.__class__.__name__ == "EulerAngleRotation":
assert_array_equal(a.phi, b.phi)
assert_array_equal(a.psi, b.psi)
assert_array_equal(a.theta, b.theta)
else:
assert_array_equal(a.lon, b.lon)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon_pole, b.lon_pole)
class GenericProjectionType(TransformType):
@classmethod
def from_tree_transform(cls, node, ctx):
args = []
for param_name, default in cls.params:
args.append(node.get(param_name, default))
if node['direction'] == 'pix2sky':
return cls.types[0](*args)
else:
return cls.types[1](*args)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if isinstance(model, cls.types[0]):
node['direction'] = 'pix2sky'
else:
node['direction'] = 'sky2pix'
for param_name, default in cls.params:
val = getattr(model, param_name).value
if val != default:
node[param_name] = val
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
_generic_projections = {
'zenithal_perspective': ('ZenithalPerspective', (('mu', 0.0), ('gamma', 0.0)), '1.2.0'),
'gnomonic': ('Gnomonic', (), None),
'stereographic': ('Stereographic', (), None),
'slant_orthographic': ('SlantOrthographic', (('xi', 0.0), ('eta', 0.0)), None),
'zenithal_equidistant': ('ZenithalEquidistant', (), None),
'zenithal_equal_area': ('ZenithalEqualArea', (), None),
'airy': ('Airy', (('theta_b', 90.0),), '1.2.0'),
'cylindrical_perspective': ('CylindricalPerspective', (('mu', 0.0), ('lam', 0.0)), '1.2.0'),
'cylindrical_equal_area': ('CylindricalEqualArea', (('lam', 0.0),), '1.2.0'),
'plate_carree': ('PlateCarree', (), None),
'mercator': ('Mercator', (), None),
'sanson_flamsteed': ('SansonFlamsteed', (), None),
'parabolic': ('Parabolic', (), None),
'molleweide': ('Molleweide', (), None),
'hammer_aitoff': ('HammerAitoff', (), None),
'conic_perspective': ('ConicPerspective', (('sigma', 0.0), ('delta', 0.0)), '1.2.0'),
'conic_equal_area': ('ConicEqualArea', (('sigma', 0.0), ('delta', 0.0)), '1.2.0'),
'conic_equidistant': ('ConicEquidistant', (('sigma', 0.0), ('delta', 0.0)), '1.2.0'),
'conic_orthomorphic': ('ConicOrthomorphic', (('sigma', 0.0), ('delta', 0.0)), '1.2.0'),
'bonne_equal_area': ('BonneEqualArea', (('theta1', 0.0),), '1.2.0'),
'polyconic': ('Polyconic', (), None),
'tangential_spherical_cube': ('TangentialSphericalCube', (), None),
'cobe_quad_spherical_cube': ('COBEQuadSphericalCube', (), None),
'quad_spherical_cube': ('QuadSphericalCube', (), None),
'healpix': ('HEALPix', (('H', 4.0), ('X', 3.0)), None),
'healpix_polar': ('HEALPixPolar', (), None)
}
def make_projection_types():
for tag_name, (name, params, version) in _generic_projections.items():
class_name = '{0}Type'.format(name)
types = ['astropy.modeling.projections.Pix2Sky_{0}'.format(name),
'astropy.modeling.projections.Sky2Pix_{0}'.format(name)]
members = {'name': 'transform/{0}'.format(tag_name),
'types': types,
'params': params}
if version:
members['version'] = version
globals()[class_name] = type(
str(class_name),
(GenericProjectionType,),
members)
__all__.append(class_name)
make_projection_types()
|
bf0cdf8db1ec0952c9305f9e0b01fd3a79dbfe611ca9dc1eb018699f96ecff63 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
import astropy.units as u
from astropy import modeling
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['ShiftType', 'ScaleType', 'PolynomialType']
class ShiftType(TransformType):
name = "transform/shift"
version = '1.2.0'
types = ['astropy.modeling.models.Shift']
@classmethod
def from_tree_transform(cls, node, ctx):
offset = node['offset']
if not isinstance(offset, u.Quantity) and not np.isscalar(offset):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Shift transform.")
return modeling.models.Shift(offset)
@classmethod
def to_tree_transform(cls, model, ctx):
offset = model.offset
node = {'offset': _parameter_to_value(offset)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Shift) and
isinstance(b, modeling.models.Shift))
assert_array_equal(a.offset.value, b.offset.value)
class ScaleType(TransformType):
name = "transform/scale"
version = '1.2.0'
types = ['astropy.modeling.models.Scale']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
if not isinstance(factor, u.Quantity) and not np.isscalar(factor):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Scale transform.")
return modeling.models.Scale(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
node = {'factor': _parameter_to_value(factor)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Scale) and
isinstance(b, modeling.models.Scale))
assert_array_equal(a.factor, b.factor)
class PolynomialType(TransformType):
name = "transform/polynomial"
types = ['astropy.modeling.models.Polynomial1D',
'astropy.modeling.models.Polynomial2D']
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
if n_dim == 1:
model = modeling.models.Polynomial1D(coefficients.size - 1)
model.parameters = coefficients
elif n_dim == 2:
shape = coefficients.shape
degree = shape[0] - 1
if shape[0] != shape[1]:
raise TypeError("Coefficients must be an (n+1, n+1) matrix")
coeffs = {}
for i in range(shape[0]):
for j in range(shape[0]):
if i + j < degree + 1:
name = 'c' + str(i) + '_' +str(j)
coeffs[name] = coefficients[i, j]
model = modeling.models.Polynomial2D(degree, **coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transform.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.models.Polynomial1D):
coefficients = np.array(model.parameters)
elif isinstance(model, modeling.models.Polynomial2D):
degree = model.degree
coefficients = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
for j in range(degree + 1):
if i + j < degree + 1:
name = 'c' + str(i) + '_' +str(j)
coefficients[i, j] = getattr(model, name).value
node = {'coefficients': coefficients}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)) and
isinstance(b, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)))
assert_array_equal(a.parameters, b.parameters)
|
fbaca7aac2d19ccf33fd37a20ccb51d07e0fcb2cd2def39d749e4ec582e370d3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
from astropy import modeling
from .basic import TransformType
__all__ = ['TabularType']
class TabularType(TransformType):
name = "transform/tabular"
version = '1.2.0'
types = [
modeling.models.Tabular2D, modeling.models.Tabular1D
]
@classmethod
def from_tree_transform(cls, node, ctx):
lookup_table = node.pop("lookup_table")
dim = lookup_table.ndim
name = node.get('name', None)
fill_value = node.pop("fill_value", None)
if dim == 1:
# The copy is necessary because the array is memory mapped.
points = (node['points'][0][:],)
model = modeling.models.Tabular1D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value, name=name)
elif dim == 2:
points = tuple([p[:] for p in node['points']])
model = modeling.models.Tabular2D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value, name=name)
else:
tabular_class = modeling.models.tabular_model(dim, name)
points = tuple([p[:] for p in node['points']])
model = tabular_class(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value, name=name)
return model
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
node["fill_value"] = model.fill_value
node["lookup_table"] = model.lookup_table
node["points"] = [p for p in model.points]
node["method"] = str(model.method)
node["bounds_error"] = model.bounds_error
node["name"] = model.name
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
assert_array_equal(a.lookup_table, b.lookup_table)
assert_array_equal(a.points, b.points)
assert (a.method == b.method)
if a.fill_value is None:
assert b.fill_value is None
elif np.isnan(a.fill_value):
assert np.isnan(b.fill_value)
else:
assert(a.fill_value == b.fill_value)
assert(a.bounds_error == b.bounds_error)
|
257acc3dbab99e1cb48a0ffd7c2936f54baa2d9fc88637a52d23497eb664a398 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import glob
from asdf import tagged
from asdf.yamlutil import custom_tree_to_tagged_tree
import astropy.coordinates
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import Quantity
from astropy.coordinates import ICRS, Longitude, Latitude, Angle
from ..unit.quantity import QuantityType
from ...types import AstropyType
__all__ = ['CoordType']
SCHEMA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..',
'schemas',
'astropy.org',
'astropy'))
def _get_frames():
"""
By reading the schema files, get the list of all the frames we can
save/load.
"""
search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')
files = glob.glob(search)
names = []
for fpath in files:
path, fname = os.path.split(fpath)
frame, _ = fname.split('-')
# Skip baseframe because we cannot directly save / load it.
# Skip icrs because we have an explicit tag for it because there are
# two versions.
if frame not in ['baseframe', 'icrs']:
names.append(frame)
return names
class BaseCoordType:
"""
This defines the base methods for coordinates, without defining anything
related to asdf types. This allows subclasses with different types and
schemas to use this without confusing the metaclass machinery.
"""
@staticmethod
def _tag_to_frame(tag):
"""
Extract the frame name from the tag.
"""
tag = tag[tag.rfind('/')+1:]
tag = tag[:tag.rfind('-')]
return frame_transform_graph.lookup_name(tag)
@classmethod
def _frame_name_to_tag(cls, frame_name):
return cls.make_yaml_tag(cls._tag_prefix + frame_name)
@classmethod
def from_tree_tagged(cls, node, ctx):
frame = cls._tag_to_frame(node._tag)
data = node.get('data', None)
if data is not None:
return frame(node['data'], **node['frame_attributes'])
return frame(**node['frame_attributes'])
@classmethod
def to_tree_tagged(cls, frame, ctx):
if type(frame) not in frame_transform_graph.frame_set:
raise ValueError("Can only save frames that are registered with the "
"transformation graph.")
node = {}
if frame.has_data:
node['data'] = custom_tree_to_tagged_tree(frame.data, ctx)
frame_attributes = {}
for attr in frame.frame_attributes.keys():
value = getattr(frame, attr, None)
if value is not None:
frame_attributes[attr] = value
node['frame_attributes'] = custom_tree_to_tagged_tree(frame_attributes, ctx)
return tagged.tag_object(cls._frame_name_to_tag(frame.name), node, ctx=ctx)
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
if new.has_data:
assert_quantity_allclose(new.data.lon, old.data.lon)
assert_quantity_allclose(new.data.lat, old.data.lat)
class CoordType(BaseCoordType, AstropyType):
_tag_prefix = "coordinates/frames/"
name = ["coordinates/frames/" + f for f in _get_frames()]
types = [astropy.coordinates.BaseCoordinateFrame]
handle_dynamic_subclasses = True
requires = ['astropy']
version = "1.0.0"
class ICRSType(CoordType):
"""
Define a special tag for ICRS so we can make it version 1.1.0.
"""
name = "coordinates/frames/icrs"
types = ['astropy.coordinates.ICRS']
version = "1.1.0"
class ICRSType10(AstropyType):
name = "coordinates/frames/icrs"
types = [astropy.coordinates.ICRS]
requires = ['astropy']
version = "1.0.0"
@classmethod
def from_tree(cls, node, ctx):
angle = Angle(QuantityType.from_tree(node['ra']['wrap_angle'], ctx))
wrap_angle = Angle(angle)
ra = Longitude(
node['ra']['value'],
unit=node['ra']['unit'],
wrap_angle=wrap_angle)
dec = Latitude(node['dec']['value'], unit=node['dec']['unit'])
return ICRS(ra=ra, dec=dec)
@classmethod
def to_tree(cls, frame, ctx):
node = {}
wrap_angle = Quantity(frame.ra.wrap_angle)
node['ra'] = {
'value': frame.ra.value,
'unit': frame.ra.unit.to_string(),
'wrap_angle': custom_tree_to_tagged_tree(wrap_angle, ctx)
}
node['dec'] = {
'value': frame.dec.value,
'unit': frame.dec.unit.to_string()
}
return node
@classmethod
def assert_equal(cls, old, new):
assert isinstance(old, ICRS)
assert isinstance(new, ICRS)
assert_quantity_allclose(new.ra, old.ra)
assert_quantity_allclose(new.dec, old.dec)
|
a78711cd04696a361f935c58ff467f83388b5cb734b6c2431b340ba33bbe3573 | from asdf.yamlutil import custom_tree_to_tagged_tree
import astropy.coordinates.representation
from astropy.coordinates.representation import BaseRepresentationOrDifferential
from astropy.tests.helper import assert_quantity_allclose
from ...types import AstropyType
class RepresentationType(AstropyType):
name = "coordinates/representation"
types = [BaseRepresentationOrDifferential]
version = "1.0.0"
_representation_module = astropy.coordinates.representation
@classmethod
def to_tree(cls, representation, ctx):
comps = representation.components
components = {}
for c in comps:
value = getattr(representation, '_' + c, None)
if value is not None:
components[c] = value
t = type(representation)
node = {}
node['type'] = t.__name__
node['components'] = custom_tree_to_tagged_tree(components, ctx)
return node
@classmethod
def from_tree(cls, node, ctx):
rep_type = getattr(cls._representation_module, node['type'])
return rep_type(**node['components'])
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
assert new.components == old.components
for comp in new.components:
nc = getattr(new, comp)
oc = getattr(old, comp)
assert_quantity_allclose(nc, oc)
|
51e80771f186ba9d803e9e9242b1c15d8653b1f33c1c9125c607ee91ee4b62ba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.yamlutil import custom_tree_to_tagged_tree
from astropy.coordinates import Angle, Latitude, Longitude
from ..unit.quantity import QuantityType
__all__ = ['AngleType', 'LatitudeType', 'LongitudeType']
class AngleType(QuantityType):
name = "coordinates/angle"
types = [Angle]
requires = ['astropy']
version = "1.0.0"
organization = 'astropy.org'
standard = 'astropy'
@classmethod
def from_tree(cls, node, ctx):
return Angle(super().from_tree(node, ctx))
class LatitudeType(AngleType):
name = "coordinates/latitude"
types = [Latitude]
@classmethod
def from_tree(cls, node, ctx):
return Latitude(super().from_tree(node, ctx))
class LongitudeType(AngleType):
name = "coordinates/longitude"
types = [Longitude]
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = node['wrap_angle']
return Longitude(super().from_tree(node, ctx), wrap_angle=wrap_angle)
@classmethod
def to_tree(cls, longitude, ctx):
tree = super().to_tree(longitude, ctx)
tree['wrap_angle'] = custom_tree_to_tagged_tree(longitude.wrap_angle, ctx)
return tree
|
7ff638dc07a718d7bded1fc98625a6fc1118527f17af9f98b054b9aeb15e1682 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from astropy import table
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf.tests import helpers
def test_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_array_columns(tmpdir):
a = np.array([([[1, 2], [3, 4]], 2.0, 'x'),
([[5, 6], [7, 8]], 5.0, 'y'),
([[9, 10], [11, 12]], 8.2, 'z')],
dtype=[(str('a'), str('<i4'), (2, 2)),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
assert t.columns['a'].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_structured_array_columns(tmpdir):
a = np.array([((1, 'a'), 2.0, 'x'),
((4, 'b'), 5.0, 'y'),
((5, 'c'), 8.2, 'z')],
dtype=[(str('a'), [(str('a0'), str('<i4')),
(str('a1'), str('|S1'))]),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_row_order(tmpdir):
a = np.array([(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')],
dtype=[(str('a'), str('<i4')),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
write_options={'auto_inline': 64})
def test_mismatched_columns():
yaml = """
table: !core/table
columns:
- !core/column
data: !core/ndarray
data: [0, 1, 2]
name: a
- !core/column
data: !core/ndarray
data: [0, 1, 2, 3]
name: b
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError):
with asdf.AsdfFile.open(buff) as ff:
pass
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'), masked=True)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['a'].mask = [True, False, True]
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
|
324e68daf3a16e3572e8eb6051528439ffb657d44444e3e3ff11b119c63357d9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
from astropy.io import fits
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf.tests import helpers
def test_complex_structure(tmpdir):
with fits.open(os.path.join(
os.path.dirname(__file__), 'data', 'complex.fits'), memmap=False) as hdulist:
tree = {
'fits': hdulist
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fits_table(tmpdir):
a = np.array(
[(0, 1), (2, 3)],
dtype=[(str('A'), int), (str('B'), int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {'fits': h}
def check_yaml(content):
assert b'!core/table' in content
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
|
995f20fac25f6bd8a993012981ca3a341478ee9158776b7f667272c5d189fd1e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
import numpy as np
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf import util
from asdf.tests import helpers
import astropy.units as u
from astropy.modeling import models as astmodels
test_models = [
astmodels.Identity(2), astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3), astmodels.Shift(2.),
astmodels.Scale(3.4), astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.*u.deg),
astmodels.Scale(3.4*u.deg),
astmodels.RotateNative2Celestial(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotateCelestial2Native(5.63*u.deg, -72.5*u.deg, 180*u.deg),
]
def test_transforms_compound(tmpdir):
tree = {
'compound':
astmodels.Shift(1) & astmodels.Shift(2) |
astmodels.Sky2Pix_TAN() |
astmodels.Rotation2D() |
astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32]) +
astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {
'rotation': rotation,
'real_rotation': real_rotation
}
def check(ff):
assert ff.tree['rotation'].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize(('model'), test_models)
def test_single_model(tmpdir, model):
tree = {'single_model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree['rot'].name == 'foo'
tree = {'rot': astmodels.Rotation2D(23, name='foo')}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {
'azp': astmodels.Sky2Pix_AZP(0.5, 0.3)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree['model'].name == 'compound_model'
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename('compound_model')
tree = {
'model': model
}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
'forward': util.resolve_name(
'astropy.modeling.projections.Sky2Pix_{0}'.format(name))(),
'backward': util.resolve_name(
'astropy.modeling.projections.Pix2Sky_{0}'.format(name))()
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[ 3., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 0.]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(points, lookup_table=table, bounds_error=False,
fill_value=None, method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
73f6528413c99a178cd5fff3ace0f0e8180b9ce29798554c67c916c07f80fd37 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import datetime
from collections import OrderedDict
import pytest
import numpy as np
from astropy import time
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf import AsdfFile, yamlutil, tagged
from asdf.tests import helpers
import asdf.schema as asdf_schema
def _flatten_combiners(schema):
newschema = OrderedDict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault('items', [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == 'items':
cursor = cursor.setdefault('items', OrderedDict())
else:
cursor = cursor.setdefault('properties', OrderedDict())
if i < len(path) - 1 and isinstance(path[i+1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, OrderedDict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(
np.arange(100), format="unix")
tree = {
'large_time_array': time_array
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1,2]*u.m, y=[3,4]*u.m, z=[5,6]*u.m)
t = time.Time([1,2], location=location, format='cxcsec')
tree = {'time': t}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_isot(tmpdir):
tree = {
'time': time.Time('2000-01-01T00:00:00.000')
}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
assert isinstance(tree['time'], str)
def test_time_tag():
schema = asdf_schema.load_schema(
'http://stsci.edu/schemas/asdf/time/time-1.1.0',
resolve_references=True)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
tag = 'tag:stsci.edu:asdf/time/time-1.1.0'
date = tagged.tag_object(tag, date)
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
|
645a7cb3b8c9e24557af9877654e1cfae70dab8ad65666708d1c1adac263a4f4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units
from astropy.coordinates import ICRS, FK5, Longitude, Latitude, Angle
from ....extension import AstropyExtension
def test_hcrs_basic(tmpdir):
ra = Longitude(25, unit=units.deg)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
def test_icrs_nodata(tmpdir):
tree = {'coord': ICRS()}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2]*units.deg, dec=[3, 4, 5]*units.deg)
tree = {'coord': icrs}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
def test_fk5_time(tmpdir):
tree = {'coord': FK5(equinox="2011-01-01T00:00:00")}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
|
a4092407df96a3757896d58c35c1e8509b41a3f3e2510c08e13869451baf4760 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import astropy.units as u
from asdf.tests.helpers import assert_roundtrip_tree
from astropy.coordinates import Longitude, Latitude, Angle
from ....extension import AstropyExtension
def test_angle(tmpdir):
tree = {'angle': Angle(100, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_latitude(tmpdir):
tree = {'angle': Latitude(10, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_longitude(tmpdir):
tree = {'angle': Longitude(-100, u.deg, wrap_angle=180*u.deg)}
assert_roundtrip_tree(tree, tmpdir)
|
64a0d6f2dbe75ecdc34f41c2102a909035baa7f5e57b06de9a75a91d99c73db7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
from numpy.random import random, randint
import astropy.units as u
from astropy.coordinates import Angle
import astropy.coordinates.representation as r
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
@pytest.fixture(params=filter(lambda x: "Base" not in x, r.__all__))
def representation(request):
rep = getattr(r, request.param)
angle_unit = u.deg
other_unit = u.km
kwargs = {}
arr_len = randint(1, 100)
for aname, atype in rep.attr_classes.items():
if issubclass(atype, Angle):
value = ([random()] * arr_len) * angle_unit
else:
value = ([random()] * arr_len) * other_unit
kwargs[aname] = value
return rep(**kwargs)
def test_representations(tmpdir, representation):
tree = {'representation': representation}
assert_roundtrip_tree(tree, tmpdir)
|
6af5ecb56b9997a8717dc4b7a37c67ddaf8b986e2582adb0baa1dcd70c2b654d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units as u
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf.tests import helpers
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
|
c2dfa457cd203a5c2c4b1b686377d73790396d00acf9eea435428e178d03ba92 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units
asdf = pytest.importorskip('asdf', minversion='2.0.0')
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = """
quantity: !unit/quantity-1.1.0
value: {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{}
unit: {}
""".format(testval, testunit)
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
1db139a9dc43c4d35d2e11a64603e4654ac7e4772a2fa9f4594d9f96f7dc8f37 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import io
import pathlib
import sys
import gzip
from unittest import mock
# THIRD-PARTY
import pytest
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from ..table import parse, parse_single_table, validate
from .. import tree
from ..exceptions import VOTableSpecError, VOWarning
from ..xmlutil import validate_schema
from ....utils.data import get_pkg_data_filename, get_pkg_data_filenames
from ....tests.helper import raises, catch_warnings
# Determine the kind of float formatting in this build of Python
if hasattr(sys, 'float_repr_style'):
legacy_float_repr = (sys.float_repr_style == 'legacy')
else:
legacy_float_repr = sys.platform.startswith('win')
def assert_validate_schema(filename, version):
if sys.platform.startswith('win'):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, 'File did not validate against VOTable schema'
def test_parse_single_table():
table = parse_single_table(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
table2 = parse_single_table(
get_pkg_data_filename('data/regression.xml'),
table_number=1,
pedantic=False)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
@raises(IndexError)
def test_parse_single_table3():
parse_single_table(
get_pkg_data_filename('data/regression.xml'),
table_number=3, pedantic=False)
def _test_regression(tmpdir, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False,
_debug_python_based_parser=_python_based)
table = votable.get_first_table()
dtypes = [
((str('string test'), str('string_test')), str('|O8')),
((str('fixed string test'), str('string_test_2')), str('|S10')),
(str('unicode_test'), str('|O8')),
((str('unicode test'), str('fixed_unicode_test')), str('<U10')),
((str('string array test'), str('string_array_test')), str('|S4')),
(str('unsignedByte'), str('|u1')),
(str('short'), str('<i2')),
(str('int'), str('<i4')),
(str('long'), str('<i8')),
(str('double'), str('<f8')),
(str('float'), str('<f4')),
(str('array'), str('|O8')),
(str('bit'), str('|b1')),
(str('bitarray'), str('|b1'), (3, 2)),
(str('bitvararray'), str('|O8')),
(str('bitvararray2'), str('|O8')),
(str('floatComplex'), str('<c8')),
(str('doubleComplex'), str('<c16')),
(str('doubleComplexArray'), str('|O8')),
(str('doubleComplexArrayFixed'), str('<c16'), (2,)),
(str('boolean'), str('|b1')),
(str('booleanArray'), str('|b1'), (4,)),
(str('nulls'), str('<i4')),
(str('nulls_array'), str('<i4'), (2, 2)),
(str('precision1'), str('<f8')),
(str('precision2'), str('<f8')),
(str('doublearray'), str('|O8')),
(str('bitarray2'), str('|b1'), (16,))
]
if sys.byteorder == 'big':
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace(str('<'), str('>'))
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(str(tmpdir.join("regression.tabledata.xml")),
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.tabledata.xml")),
votable.version)
if binary_mode == 1:
votable.get_first_table().format = 'binary'
votable.version = '1.1'
elif binary_mode == 2:
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
votable.version = '1.3'
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.binary.xml")),
votable.version)
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "rb") as fd:
votable2 = parse(fd, pedantic=False,
_debug_python_based_parser=_python_based)
votable2.get_first_table().format = 'tabledata'
votable2.to_xml(str(tmpdir.join("regression.bin.tabledata.xml")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.bin.tabledata.xml")),
votable.version)
with open(
get_pkg_data_filename(
'data/regression.bin.tabledata.truth.{0}.xml'.format(
votable.version)),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
with open(str(tmpdir.join("regression.bin.tabledata.xml")),
'rt', encoding='utf-8') as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmpdir.join("regression.bin.tabledata.xml.gz")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
with gzip.GzipFile(
str(tmpdir.join("regression.bin.tabledata.xml.gz")), 'rb') as gzfd:
output = gzfd.readlines()
output = [x.decode('utf-8').rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression(tmpdir):
_test_regression(tmpdir, False)
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression_python_based_parser(tmpdir):
_test_regression(tmpdir, True)
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression_binary2(tmpdir):
_test_regression(tmpdir, False, 2)
class TestFixups:
def setup_class(self):
self.table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array['string_test_2'],
self.array['fixed string test'])
class TestReferences:
def setup_class(self):
self.votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == 'boolean'
assert fieldref.get_ref().datatype == 'boolean'
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == 'INPUT'
assert paramref.get_ref().datatype == 'float'
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False, columns=columns).get_first_table()
array = table.array
mask = table.array.mask
assert array['string_test'][0] == b"String & test"
columns = ['string_test', 'unsignedByte', 'bitarray']
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
def test_select_columns_by_name():
columns = ['string_test', 'unsignedByte', 'bitarray']
table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False, columns=columns).get_first_table()
array = table.array
mask = table.array.mask
assert array['string_test'][0] == b"String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
class TestParse:
def setup_class(self):
self.votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array['string_test'].dtype.type,
np.object_)
assert_array_equal(
self.array['string_test'],
[b'String & test', b'String & test', b'XXXX',
b'', b''])
def test_fixed_string_test(self):
assert issubclass(self.array['string_test_2'].dtype.type,
np.string_)
assert_array_equal(
self.array['string_test_2'],
[b'Fixed stri', b'0123456789', b'XXXX', b'', b''])
def test_unicode_test(self):
assert issubclass(self.array['unicode_test'].dtype.type,
np.object_)
assert_array_equal(self.array['unicode_test'],
["Ceçi n'est pas un pipe",
'வணக்கம்',
'XXXX', '', ''])
def test_fixed_unicode_test(self):
assert issubclass(self.array['fixed_unicode_test'].dtype.type,
np.unicode_)
assert_array_equal(self.array['fixed_unicode_test'],
["Ceçi n'est",
'வணக்கம்',
'0123456789', '', ''])
def test_unsignedByte(self):
assert issubclass(self.array['unsignedByte'].dtype.type,
np.uint8)
assert_array_equal(self.array['unsignedByte'],
[128, 255, 0, 255, 255])
assert not np.any(self.mask['unsignedByte'])
def test_short(self):
assert issubclass(self.array['short'].dtype.type,
np.int16)
assert_array_equal(self.array['short'],
[4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask['short'])
def test_int(self):
assert issubclass(self.array['int'].dtype.type,
np.int32)
assert_array_equal(
self.array['int'],
[268435456, 2147483647, -268435456, 268435455, 123456789])
assert_array_equal(self.mask['int'],
[False, False, False, False, True])
def test_long(self):
assert issubclass(self.array['long'].dtype.type,
np.int64)
assert_array_equal(
self.array['long'],
[922337203685477, 123456789, -1152921504606846976,
1152921504606846975, 123456789])
assert_array_equal(self.mask['long'],
[False, True, False, False, True])
def test_double(self):
assert issubclass(self.array['double'].dtype.type,
np.float64)
assert_array_equal(self.array['double'],
[8.9990234375, 0.0, np.inf, np.nan, -np.inf])
assert_array_equal(self.mask['double'],
[False, False, False, True, False])
def test_float(self):
assert issubclass(self.array['float'].dtype.type,
np.float32)
assert_array_equal(self.array['float'],
[1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask['float'],
[False, False, False, False, True])
def test_array(self):
assert issubclass(self.array['array'].dtype.type,
np.object_)
match = [[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]]]
for a, b in zip(self.array['array'], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data['array'][3].mask[0][0]
assert self.array.data['array'][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array['bit'].dtype.type,
np.bool_)
assert_array_equal(self.array['bit'],
[True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array['bitarray'].dtype.type,
np.bool_)
assert self.array['bitarray'].shape == (5, 3, 2)
assert_array_equal(self.array['bitarray'],
[[[True, False],
[True, True],
[False, True]],
[[False, True],
[False, False],
[True, True]],
[[True, True],
[True, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]]])
def test_bitarray_mask(self):
assert_array_equal(self.mask['bitarray'],
[[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[True, True],
[True, True],
[True, True]],
[[True, True],
[True, True],
[True, True]]])
def test_bitvararray(self):
assert issubclass(self.array['bitvararray'].dtype.type,
np.object_)
match = [[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[], []]
for a, b in zip(self.array['bitvararray'], match):
assert_array_equal(a, b)
match_mask = [[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False, False]
for a, b in zip(self.array['bitvararray'], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array['bitvararray2'].dtype.type,
np.object_)
match = [[],
[[[False, True],
[False, False],
[True, False]],
[[True, False],
[True, False],
[True, False]]],
[[[True, True],
[True, True],
[True, True]]],
[],
[]]
for a, b in zip(self.array['bitvararray2'], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array['floatComplex'].dtype.type,
np.complex64)
assert_array_equal(self.array['floatComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+0j, np.nan+0j])
assert_array_equal(self.mask['floatComplex'],
[True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array['doubleComplex'].dtype.type,
np.complex128)
assert_array_equal(
self.array['doubleComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+(np.inf*1j), np.nan+0j])
assert_array_equal(self.mask['doubleComplex'],
[True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array['doubleComplexArray'].dtype.type,
np.object_)
assert ([len(x) for x in self.array['doubleComplexArray']] ==
[0, 2, 2, 0, 0])
def test_boolean(self):
assert issubclass(self.array['boolean'].dtype.type,
np.bool_)
assert_array_equal(self.array['boolean'],
[True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask['boolean'],
[False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array['booleanArray'].dtype.type,
np.bool_)
assert_array_equal(self.array['booleanArray'],
[[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False]])
def test_boolean_array_mask(self):
assert_array_equal(self.mask['booleanArray'],
[[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True]])
def test_nulls(self):
assert_array_equal(self.array['nulls'],
[0, -9, 2, -9, -9])
assert_array_equal(self.mask['nulls'],
[False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(self.array['nulls_array'],
[[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]]])
assert_array_equal(self.mask['nulls_array'],
[[[True, True],
[True, True]],
[[False, False],
[False, False]],
[[True, False],
[True, False]],
[[False, True],
[False, True]],
[[True, True],
[True, True]]])
def test_double_array(self):
assert issubclass(self.array['doublearray'].dtype.type,
np.object_)
assert len(self.array['doublearray'][0]) == 0
assert_array_equal(self.array['doublearray'][1],
[0, 1, np.inf, -np.inf, np.nan, 0, -1])
assert_array_equal(self.array.data['doublearray'][1].mask,
[False, False, False, False, False, False, True])
def test_bit_array2(self):
assert_array_equal(self.array['bitarray2'][0],
[True, True, True, True,
False, False, False, False,
True, True, True, True,
False, False, False, False])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'][0])
assert np.all(self.mask['bitarray2'][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id('J2000')
assert coosys.system == 'eq_FK5'
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id('QUERY_STATUS')
assert info.value == 'OK'
if self.votable.version != '1.1':
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..." # noqa
def test_repr(self):
assert '3 tables' in repr(self.votable)
assert repr(list(self.votable.iter_fields_and_params())[0]) == \
'<PARAM ID="awesome" arraysize="*" datatype="float" name="INPUT" unit="deg" value="[0.0 0.0]"/>' # noqa
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == '[</>]'
class TestThroughTableData(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
def test_schema(self, tmpdir):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = str(tmpdir.join("test_through_tabledata.xml"))
with open(fn, 'wb') as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, '1.1')
class TestThroughBinary(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
votable.get_first_table().format = 'binary'
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask['bit'])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
class TestThroughBinary2(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
votable.version = '1.3'
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from ..tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
def test_open_files():
for filename in get_pkg_data_filenames('data', pattern='*.xml'):
if filename.endswith('custom_datatype.xml'):
continue
parse(filename, pedantic=False)
@raises(VOTableSpecError)
def test_too_many_columns():
parse(
get_pkg_data_filename('data/too_many_columns.xml.gz'),
pedantic=False)
def test_build_from_scratch(tmpdir):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
tree.Field(votable, ID="filename", datatype="char"),
tree.Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmpdir.join("new_votable.xml")))
votable = parse(str(tmpdir.join("new_votable.xml")))
table = votable.get_first_table()
assert_array_equal(
table.array.mask, np.array([(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]])],
dtype=[(str('filename'), str('?')),
(str('matrix'), str('?'), (2, 2))]))
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename('data/regression.xml')
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
with catch_warnings():
result = validate(fpath,
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/validation.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
@mock.patch('subprocess.Popen')
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('ok', 'ko'),
'returncode': 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename('data/empty_table.xml'),
xmllint=True)
def test_validate_path_object():
"""
Validating when source is passed as path object. (#4412)
"""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmpdir):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
with open(str(tmpdir.join("regression.compressed.xml")), 'wb') as fd:
votable.to_xml(
fd,
compressed=True,
_astropy_version="testing")
with open(str(tmpdir.join("regression.compressed.xml")), 'rb') as fd:
votable = parse(
fd,
pedantic=False)
def test_from_scratch_example():
with catch_warnings(VOWarning) as warning_lines:
try:
_run_test_from_scratch_example()
except ValueError as e:
warning_lines.append(str(e))
assert len(warning_lines) == 0
def _run_test_from_scratch_example():
from ..tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == 'test1.xml'
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from ....utils.xml import iterparser
filename = get_pkg_data_filename('data/regression.xml')
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == 'win32':
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from .... import units as u
votable = parse(
get_pkg_data_filename('data/nonstandard_units.xml'),
pedantic=False)
assert isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(
get_pkg_data_filename('data/nonstandard_units.xml'),
pedantic=False,
unit_format='generic')
assert not isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = 't2'
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
with catch_warnings():
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(get_pkg_data_filename('data/no_resource.xml'),
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/no_resource.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(
get_pkg_data_filename('data/custom_datatype.xml'),
pedantic=False,
datatype_mapping={'bar': 'int'}
)
table = votable.get_first_table()
assert table.array.dtype['foo'] == np.int32
|
054f0fd151ec7c2836b931726ed7fd6be5b979374fdf7e6e3668e2027b3a12b8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table
"""
import io
import os
import pathlib
import numpy as np
from ....utils.data import get_pkg_data_filename, get_pkg_data_fileobj
from ..table import parse, writeto
from .. import tree
def test_table(tmpdir):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
('string_test', {'datatype': 'char', 'arraysize': '*'}),
('string_test_2', {'datatype': 'char', 'arraysize': '10'}),
('unicode_test', {'datatype': 'unicodeChar', 'arraysize': '*'}),
('fixed_unicode_test', {'datatype': 'unicodeChar', 'arraysize': '10'}),
('string_array_test', {'datatype': 'char', 'arraysize': '4'}),
('unsignedByte', {'datatype': 'unsignedByte'}),
('short', {'datatype': 'short'}),
('int', {'datatype': 'int'}),
('long', {'datatype': 'long'}),
('double', {'datatype': 'double'}),
('float', {'datatype': 'float'}),
('array', {'datatype': 'long', 'arraysize': '2*'}),
('bit', {'datatype': 'bit'}),
('bitarray', {'datatype': 'bit', 'arraysize': '3x2'}),
('bitvararray', {'datatype': 'bit', 'arraysize': '*'}),
('bitvararray2', {'datatype': 'bit', 'arraysize': '3x2*'}),
('floatComplex', {'datatype': 'floatComplex'}),
('doubleComplex', {'datatype': 'doubleComplex'}),
('doubleComplexArray', {'datatype': 'doubleComplex', 'arraysize': '*'}),
('doubleComplexArrayFixed', {'datatype': 'doubleComplex', 'arraysize': '2'}),
('boolean', {'datatype': 'bit'}),
('booleanArray', {'datatype': 'bit', 'arraysize': '4'}),
('nulls', {'datatype': 'int'}),
('nulls_array', {'datatype': 'int', 'arraysize': '2x2'}),
('precision1', {'datatype': 'double'}),
('precision2', {'datatype': 'double'}),
('doublearray', {'datatype': 'double', 'arraysize': '*'}),
('bitarray2', {'datatype': 'bit', 'arraysize': '16'})]
for field, type in zip(t.fields, field_types):
name, d = type
assert field.ID == name
assert field.datatype == d['datatype']
if 'arraysize' in d:
assert field.arraysize == d['arraysize']
writeto(votable2, os.path.join(str(tmpdir), "through_table.xml"))
def test_read_through_table_interface(tmpdir):
from ....table import Table
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='main_table')
assert len(t) == 5
fn = os.path.join(str(tmpdir), "table_interface.xml")
t.write(fn, table_id='FOO', format='votable')
with open(fn, 'rb') as fd:
t2 = Table.read(fd, format='votable', table_id='FOO')
assert len(t2) == 5
def test_read_through_table_interface2():
from ....table import Table
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='last_table')
assert len(t) == 0
def test_names_over_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
'Name', 'GLON', 'GLAT', 'RAdeg', 'DEdeg', 'Jmag', 'Hmag', 'Kmag',
'G3.6mag', 'G4.5mag', 'G5.8mag', 'G8.0mag', '4.5mag', '8.0mag',
'Emag', '24mag', 'f_Name']
def test_explicit_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=False)
assert table.colnames == [
'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9',
'col10', 'col11', 'col12', 'col13', 'col14', 'col15', 'col16', 'col17']
def test_table_read_with_unnamed_tables():
"""
Issue #927
"""
from ....table import Table
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable')
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename('data/names.xml'))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
from ....table import Table, Column
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable')
def test_write_with_format():
from ....table import Table, Column
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.3"' in obuff
assert b'BINARY' in obuff
assert b'TABLEDATA' not in obuff
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.3"' in obuff
assert b'BINARY2' in obuff
assert b'TABLEDATA' not in obuff
def test_empty_table():
votable = parse(
get_pkg_data_filename('data/empty_table.xml'),
pedantic=False)
table = votable.get_first_table()
astropy_table = table.to_table() # noqa
|
0107ddb8694d5b20ae5a2a5ec583d9df0f7200812dcc3cb52bc08d1d48a28ea2 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken and by default the system-installed version
of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers``
may be called manually from within the setup.py script).
This behavior can also be controlled using the ``--auto-use`` and
``--no-auto-use`` command-line flags. For clarity, an alias for
``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using
the latter if needed.
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import io
import locale
import os
import re
import subprocess as sp
import sys
__minimum_python_version__ = (3, 5)
if sys.version_info < __minimum_python_version__:
print("ERROR: Python {} or later is required by astropy-helpers".format(
__minimum_python_version__))
sys.exit(1)
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
_str_types = (str, bytes)
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Check that setuptools 1.0 or later is present
from distutils.version import LooseVersion
try:
import setuptools
assert LooseVersion(setuptools.__version__) >= LooseVersion('1.0')
except (ImportError, AssertionError):
print("ERROR: setuptools 1.0 or later is required by astropy-helpers")
sys.exit(1)
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
UPPER_VERSION_EXCLUSIVE = None
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if not isinstance(path, str):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
if '--auto-use' in argv:
config['auto_use'] = True
argv.remove('--auto-use')
if '--no-auto-use' in argv:
config['auto_use'] = False
argv.remove('--no-auto-use')
if '--use-system-astropy-helpers' in argv:
config['auto_use'] = False
argv.remove('--use-system-astropy-helpers')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
# NOTE: we need to parse the config file (e.g. setup.cfg) to make sure
# it honours the options set in the [easy_install] section, and we need
# to explicitly fetch the requirement eggs as setup_requires does not
# get honored in recent versions of setuptools:
# https://github.com/pypa/setuptools/issues/1273
try:
context = _verbose if DEBUG else _silence
with context():
dist = _Distribution(attrs=attrs)
try:
dist.parse_config_files(ignore_option_errors=True)
dist.fetch_build_eggs(req)
except TypeError:
# On older versions of setuptools, ignore_option_errors
# doesn't exist, and the above two lines are not needed
# so we can just continue
pass
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _verbose():
yield
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
029bc732ec918bfa2e82e1dd18aa8e9ece82cc5cb5ccf77a60509d4cd9a8623b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import sys
import os
from warnings import warn
__minimum_python_version__ = '3.5'
__minimum_numpy_version__ = '1.13.0'
class UnsupportedPythonError(Exception):
pass
# This is the same check as the one at the top of setup.py
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
raise UnsupportedPythonError("Astropy does not support Python < {}".format(__minimum_python_version__))
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
def _is_astropy_setup():
"""
Returns whether we are currently being imported in the context of running
Astropy's setup.py.
"""
main_mod = sys.modules.get('__main__')
if not main_mod:
return False
return (getattr(main_mod, '__file__', False) and
os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py' and
_is_astropy_source(main_mod.__file__))
# this indicates whether or not we are in astropy's setup.py
try:
_ASTROPY_SETUP_
except NameError:
from sys import version_info
import builtins
# This will set the _ASTROPY_SETUP_ to True by default if
# we are running Astropy's setup.py
builtins._ASTROPY_SETUP_ = _is_astropy_setup()
try:
from .version import version as __version__
except ImportError:
# TODO: Issue a warning using the logging framework
__version__ = ''
try:
from .version import githash as __githash__
except ImportError:
# TODO: Issue a warning using the logging framework
__githash__ = ''
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'http://docs.astropy.org/en/latest/'
else:
online_docs_root = 'http://docs.astropy.org/en/{0}/'.format(__version__)
def _check_numpy():
"""
Check that Numpy is installed and it is of the minimum version we
require.
"""
# Note: We could have used distutils.version for this comparison,
# but it seems like overkill to import distutils at runtime.
requirement_met = False
try:
import numpy
except ImportError:
pass
else:
from .utils import minversion
requirement_met = minversion(numpy, __minimum_numpy_version__)
if not requirement_met:
msg = ("Numpy version {0} or later must be installed to use "
"Astropy".format(__minimum_numpy_version__))
raise ImportError(msg)
return numpy
if not _ASTROPY_SETUP_:
_check_numpy()
from . import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Create the test() function
from .tests.runner import TestRunner
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
from . import config
def _rollback_import(message):
log.error(message)
# Now disable exception logging to avoid an annoying error in the
# exception logger before we raise the import error:
_teardown_log()
# Roll back any astropy sub-modules that have been imported thus
# far
for key in list(sys.modules):
if key.startswith('astropy.'):
del sys.modules[key]
raise ImportError('astropy')
try:
from .utils import _compiler
except ImportError:
if _is_astropy_source():
log.warning('You appear to be trying to import astropy from '
'within a source checkout without building the '
'extension modules first. Attempting to (re)build '
'extension modules:')
try:
_rebuild_extensions()
except BaseException as exc:
_rollback_import(
'An error occurred while attempting to rebuild the '
'extension modules. Please try manually running '
'`./setup.py develop` or `./setup.py build_ext '
'--inplace` to see what the issue was. Extension '
'modules must be successfully compiled and importable '
'in order to import astropy.')
# Reraise the Exception only in case it wasn't an Exception,
# for example if a "SystemExit" or "KeyboardInterrupt" was
# invoked.
if not isinstance(exc, Exception):
raise
else:
# Outright broken installation; don't be nice.
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
def _rebuild_extensions():
global __version__
global __githash__
import subprocess
import time
from .utils.console import Spinner
devnull = open(os.devnull, 'w')
old_cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
try:
sp = subprocess.Popen([sys.executable, 'setup.py', 'build_ext',
'--inplace'], stdout=devnull,
stderr=devnull)
with Spinner('Rebuilding extension modules') as spinner:
while sp.poll() is None:
next(spinner)
time.sleep(0.05)
finally:
os.chdir(old_cwd)
devnull.close()
if sp.returncode != 0:
raise OSError('Running setup.py build_ext --inplace failed '
'with error code {0}: try rerunning this command '
'manually to check what the error was.'.format(
sp.returncode))
# Try re-loading module-level globals from the astropy.version module,
# which may not have existed before this function ran
try:
from .version import version as __version__
except ImportError:
pass
try:
from .version import githash as __githash__
except ImportError:
pass
# Set the bibtex entry to the article referenced in CITATION
def _get_bibtex():
import re
if os.path.exists('CITATION'):
with open('CITATION', 'r') as citation:
refs = re.findall(r'\{[^()]*\}', citation.read())
if len(refs) == 0: return ''
bibtexreference = "@ARTICLE{0}".format(refs[0])
return bibtexreference
else:
return ''
__bibtex__ = _get_bibtex()
import logging
# Use the root logger as a dummy log before initilizing Astropy's logger
log = logging.getLogger()
if not _ASTROPY_SETUP_:
from .logger import _init_log, _teardown_log
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
from urllib.parse import urlencode
import webbrowser
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = 'http://docs.astropy.org/en/{0}/search.html?{1}'.format(
version, urlencode({'q': query}))
webbrowser.open(url)
__dir_inc__ = ['__version__', '__githash__', '__minimum_numpy_version__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf']
from types import ModuleType as __module_type__
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir_inc__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
de9f007f6d5abd2414f0728ee6bb684c9fcfebb0b741bc9acacdf6472e1c580f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_docs" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
from datetime import datetime
import os
import sys
import astropy
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
check_sphinx_version("1.2.1")
# The intersphinx_mapping in astropy_helpers.sphinx.conf refers to astropy for
# the benefit of affiliated packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy']
# add any custom intersphinx for astropy
intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/stable/', None)
intersphinx_mapping['ipython'] = ('http://ipython.readthedocs.io/en/stable/', None)
intersphinx_mapping['pandas'] = ('http://pandas.pydata.org/pandas-docs/stable/', None)
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None)
intersphinx_mapping['packagetemplate'] = ('http://docs.astropy.org/projects/package-template/en/latest/', None)
intersphinx_mapping['h5py'] = ('http://docs.h5py.org/en/stable/', None)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('_pkgtemplate.rst')
exclude_patterns.append('**/*.inc.rst') # .inc.rst mean *include* files, don't have sphinx process them
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
.. |minimum_numpy_version| replace:: {0.__minimum_numpy_version__}
.. Astropy
.. _Astropy: http://astropy.org
.. _`Astropy mailing list`: https://mail.python.org/mailman/listinfo/astropy
.. _`astropy-dev mailing list`: http://groups.google.com/group/astropy-dev
""".format(astropy)
# -- Project information ------------------------------------------------------
project = u'Astropy'
author = u'The Astropy Developers'
copyright = u'2011–{0}, '.format(datetime.utcnow().year) + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = astropy.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = astropy.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
#html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ----------------------------------------
extensions += ['sphinx_astropy.ext.edit_on_github']
# Don't import the module as "version" or it will override the
# "version" configuration parameter
from astropy import version as versionmod
edit_on_github_project = "astropy/astropy"
if versionmod.release:
edit_on_github_branch = "v{0}.{1}.x".format(
versionmod.major, versionmod.minor)
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
edit_on_github_skip_regex = '_.*|api/.*'
github_issues_url = 'https://github.com/astropy/astropy/issues/'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_"
'examples_dirs': '..{}examples'.format(os.sep), # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'http://matplotlib.org/',
'numpy': 'http://docs.scipy.org/doc/numpy/',
},
'abort_on_example_error': True
}
except ImportError:
def setup(app):
app.warn('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
linkcheck_anchors = False
|
60f64ab7ae5c3a8a312b30268538d85dc274fc06e660fb1f1c6d9979615a2b22 | # -*- coding: utf-8 -*-
"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy-coordinates-design` and the
docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we
will define a coordinate system defined by the plane of orbit of the Sagittarius
Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr
coordinate system is often referred to in terms of two angular coordinates,
:math:`\Lambda,B`.
To do this, wee need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page http://www.stsci.edu/~dlaw/Sgr/
-------------------
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
-------------------
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
http://adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
http://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-deteremined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.ICRS(280.161732*u.degree, 11.91934*u.degree)
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(r"$\mu_\Lambda \, \cos B$ [{0}]"
.format(sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')))
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(r"$\mu_\alpha \, \cos\delta$ [{0}]"
.format(icrs.pm_ra_cosdec.unit.to_string('latex_inline')))
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(r"$\mu_\delta$ [{0}]"
.format(icrs.pm_dec.unit.to_string('latex_inline')))
plt.show()
|
bc86096363f56c068f5af6e9c6dcbc4a911e5830bd336dacd3388c34d1ccad86 | # -*- coding: utf-8 -*-
"""
==========================================
Create a very large FITS file from scratch
==========================================
This example demonstrates how to create a large file (larger than will fit in
memory) from scratch using `astropy.io.fits`.
-------------------
*By: Erik Bray*
*License: BSD*
-------------------
"""
##############################################################################
# Normally to create a single image FITS file one would do something like:
import os
import numpy as np
from astropy.io import fits
data = np.zeros((40000, 40000), dtype=np.float64)
hdu = fits.PrimaryHDU(data=data)
##############################################################################
# Then use the `astropy.io.fits.writeto()` method to write out the new
# file to disk
hdu.writeto('large.fits')
##############################################################################
# However, a 40000 x 40000 array of doubles is nearly twelve gigabytes! Most
# systems won't be able to create that in memory just to write out to disk. In
# order to create such a large file efficiently requires a little extra work,
# and a few assumptions.
#
# First, it is helpful to anticipate about how large (as in, how many keywords)
# the header will have in it. FITS headers must be written in 2880 byte
# blocks, large enough for 36 keywords per block (including the END keyword in
# the final block). Typical headers have somewhere between 1 and 4 blocks,
# though sometimes more.
#
# Since the first thing we write to a FITS file is the header, we want to write
# enough header blocks so that there is plenty of padding in which to add new
# keywords without having to resize the whole file. Say you want the header to
# use 4 blocks by default. Then, excluding the END card which Astropy will add
# automatically, create the header and pad it out to 36 * 4 cards.
#
# Create a stub array to initialize the HDU; its
# exact size is irrelevant, as long as it has the desired number of
# dimensions
data = np.zeros((100, 100), dtype=np.float64)
hdu = fits.PrimaryHDU(data=data)
header = hdu.header
while len(header) < (36 * 4 - 1):
header.append() # Adds a blank card to the end
##############################################################################
# Now adjust the NAXISn keywords to the desired size of the array, and write
# only the header out to a file. Using the ``hdu.writeto()`` method will cause
# astropy to "helpfully" reset the NAXISn keywords to match the size of the
# dummy array. That is because it works hard to ensure that only valid FITS
# files are written. Instead, we can write just the header to a file using the
# `astropy.io.fits.Header.tofile` method:
header['NAXIS1'] = 40000
header['NAXIS2'] = 40000
header.tofile('large.fits')
##############################################################################
# Finally, grow out the end of the file to match the length of the
# data (plus the length of the header). This can be done very efficiently on
# most systems by seeking past the end of the file and writing a single byte,
# like so:
with open('large.fits', 'rb+') as fobj:
# Seek past the length of the header, plus the length of the
# Data we want to write.
# 8 is the number of bytes per value, i.e. abs(header['BITPIX'])/8
# (this example is assuming a 64-bit float)
# The -1 is to account for the final byte that we are about to
# write:
fobj.seek(len(header.tostring()) + (40000 * 40000 * 8) - 1)
fobj.write(b'\0')
##############################################################################
# More generally, this can be written:
shape = tuple(header['NAXIS{0}'.format(ii)] for ii in range(1, header['NAXIS']+1))
with open('large.fits', 'rb+') as fobj:
fobj.seek(len(header.tostring()) + (np.product(shape) * np.abs(header['BITPIX']//8)) - 1)
fobj.write(b'\0')
##############################################################################
# On modern operating systems this will cause the file (past the header) to be
# filled with zeros out to the ~12GB needed to hold a 40000 x 40000 image. On
# filesystems that support sparse file creation (most Linux filesystems, but not
# the HFS+ filesystem used by most Macs) this is a very fast, efficient
# operation. On other systems your mileage may vary.
#
# This isn't the only way to build up a large file, but probably one of the
# safest. This method can also be used to create large multi-extension FITS
# files, with a little care.
##############################################################################
# Finally, we'll remove the file we created:
os.remove('large.fits')
|
31ef4a03a458ba713df3ac5af2e5fb796692615590d6e822eecc4aa01c5a98cc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from ..utils import isiterable
from ..utils.decorators import deprecated_renamed_argument
from ..utils.exceptions import AstropyUserWarning
try:
import bottleneck # pylint: disable=W0611
HAS_BOTTLENECK = True
except ImportError:
HAS_BOTTLENECK = False
__all__ = ['SigmaClip', 'sigma_clip', 'sigma_clipped_stats']
def _move_tuple_axes_first(array, axis):
"""
Bottleneck can only take integer axis, not tuple, so this function
takes all the axes to be operated on and combines them into the
first dimension of the array so that we can then use axis=0
"""
# Figure out how many axes we are operating over
naxis = len(axis)
# Add remaining axes to the axis tuple
axis += tuple(i for i in range(array.ndim) if i not in axis)
# The new position of each axis is just in order
destination = tuple(range(array.ndim))
# Reorder the array so that the axes being operated on are at the beginning
array_new = np.moveaxis(array, axis, destination)
# Figure out the size of the product of the dimensions being operated on
first = np.prod(array_new.shape[:naxis])
# Collapse the dimensions being operated on into a single dimension so that
# we can then use axis=0 with the bottleneck functions
array_new = array_new.reshape((first,) + array_new.shape[naxis:])
return array_new
def _nanmean(array, axis=None):
"""Bottleneck nanmean function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
return bottleneck.nanmean(array, axis=axis)
def _nanmedian(array, axis=None):
"""Bottleneck nanmedian function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
return bottleneck.nanmedian(array, axis=axis)
def _nanstd(array, axis=None, ddof=0):
"""Bottleneck nanstd function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
return bottleneck.nanstd(array, axis=axis, ddof=ddof)
class SigmaClip:
"""
Class to perform sigma clipping.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int]))
data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int]))
Invalid data values (i.e. NaN or inf) are automatically clipped.
For a functional interface to sigma clipping, see
:func:`sigma_clip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class. Also, its
input data cannot be a masked array and it does not handle data
that contains invalid values (i.e. NaN or inf). Also note that
it uses the mean as the centering function.
If your data is a `~numpy.ndarray` with no invalid values and
you want to use the mean as the centering function with
``axis=None`` and iterate to convergence, then
`scipy.stats.sigmaclip` is ~25-30% faster than the equivalent
settings here (``s = SigmaClip(cenfunc='mean', maxiters=None);
s(data, axis=None)``).
Parameters
----------
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g. `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/kwgoodman/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
See Also
--------
sigma_clip, sigma_clipped_stats
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=2, maxiters=5)
>>> filtered_data = sigclip(randvar)
This example clips all points that are more than 3 sigma relative to
the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and modifies the data in-place::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=3, maxiters=None, cenfunc='mean')
>>> filtered_data = sigclip(randvar, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> sigclip = SigmaClip(sigma=2.3)
>>> filtered_data = sigclip(data, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
@deprecated_renamed_argument('iters', 'maxiters', '3.1')
def __init__(self, sigma=3., sigma_lower=None, sigma_upper=None,
maxiters=5, cenfunc='median', stdfunc='std'):
self.sigma = sigma
self.sigma_lower = sigma_lower or sigma
self.sigma_upper = sigma_upper or sigma
self.maxiters = maxiters or np.inf
self.cenfunc = self._parse_cenfunc(cenfunc)
self.stdfunc = self._parse_stdfunc(stdfunc)
def __repr__(self):
return ('SigmaClip(sigma={0}, sigma_lower={1}, sigma_upper={2}, '
'maxiters={3}, cenfunc={4}, stdfunc={5})'
.format(self.sigma, self.sigma_lower, self.sigma_upper,
self.maxiters, self.cenfunc, self.stdfunc))
def __str__(self):
lines = ['<' + self.__class__.__name__ + '>']
attrs = ['sigma', 'sigma_lower', 'sigma_upper', 'maxiters', 'cenfunc',
'stdfunc']
for attr in attrs:
lines.append(' {0}: {1}'.format(attr, getattr(self, attr)))
return '\n'.join(lines)
def _parse_cenfunc(self, cenfunc):
if isinstance(cenfunc, str):
if cenfunc == 'median':
if HAS_BOTTLENECK:
cenfunc = _nanmedian
else:
cenfunc = np.nanmedian # pragma: no cover
elif cenfunc == 'mean':
if HAS_BOTTLENECK:
cenfunc = _nanmean
else:
cenfunc = np.nanmean # pragma: no cover
else:
raise ValueError('{} is an invalid cenfunc.'.format(cenfunc))
return cenfunc
def _parse_stdfunc(self, stdfunc):
if isinstance(stdfunc, str):
if stdfunc != 'std':
raise ValueError('{} is an invalid stdfunc.'.format(stdfunc))
if HAS_BOTTLENECK:
stdfunc = _nanstd
else:
stdfunc = np.nanstd # pragma: no cover
return stdfunc
def _compute_bounds(self, data, axis=None):
# ignore RuntimeWarning if the array (or along an axis) has only
# NaNs
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
self._max_value = self.cenfunc(data, axis=axis)
std = self.stdfunc(data, axis=axis)
self._min_value = self._max_value - (std * self.sigma_lower)
self._max_value += std * self.sigma_upper
def _sigmaclip_noaxis(self, data, masked=True, return_bounds=False,
copy=True):
"""
Sigma clip the data when ``axis`` is None.
In this simple case, we remove clipped elements from the
flattened array during each iteration.
"""
filtered_data = data.ravel()
# remove masked values and convert to ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = filtered_data.data[~filtered_data.mask]
# remove invalid values
good_mask = np.isfinite(filtered_data)
if np.any(~good_mask):
filtered_data = filtered_data[good_mask]
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically clipped.',
AstropyUserWarning)
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
size = filtered_data.size
self._compute_bounds(filtered_data, axis=None)
filtered_data = filtered_data[(filtered_data >= self._min_value) &
(filtered_data <= self._max_value)]
nchanged = size - filtered_data.size
self._niterations = iteration
if masked:
# return a masked array and optional bounds
filtered_data = np.ma.masked_invalid(data, copy=copy)
# update the mask in place, ignoring RuntimeWarnings for
# comparisons with NaN data values
with np.errstate(invalid='ignore'):
filtered_data.mask |= np.logical_or(data < self._min_value,
data > self._max_value)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def _sigmaclip_withaxis(self, data, axis=None, masked=True,
return_bounds=False, copy=True):
"""
Sigma clip the data when ``axis`` is specified.
In this case, we replace clipped values with NaNs as placeholder
values.
"""
# float array type is needed to insert nans into the array
filtered_data = data.astype(float) # also makes a copy
# remove invalid values
bad_mask = ~np.isfinite(filtered_data)
if np.any(bad_mask):
filtered_data[bad_mask] = np.nan
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically clipped.',
AstropyUserWarning)
# remove masked values and convert to plain ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = np.ma.masked_invalid(filtered_data).astype(float)
filtered_data = filtered_data.filled(np.nan)
# convert negative axis/axes
if not isiterable(axis):
axis = (axis,)
axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis)
# define the shape of min/max arrays so that they can be broadcast
# with the data
mshape = tuple(1 if dim in axis else size
for dim, size in enumerate(filtered_data.shape))
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
n_nan = np.count_nonzero(np.isnan(filtered_data))
self._compute_bounds(filtered_data, axis=axis)
if not np.isscalar(self._min_value):
self._min_value = self._min_value.reshape(mshape)
self._max_value = self._max_value.reshape(mshape)
with np.errstate(invalid='ignore'):
filtered_data[(filtered_data < self._min_value) |
(filtered_data > self._max_value)] = np.nan
nchanged = n_nan - np.count_nonzero(np.isnan(filtered_data))
self._niterations = iteration
if masked:
# create an output masked array
if copy:
filtered_data = np.ma.masked_invalid(filtered_data)
else:
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid='ignore'):
out = np.ma.masked_invalid(data, copy=False)
filtered_data = np.ma.masked_where(np.logical_or(
out < self._min_value, out > self._max_value),
out, copy=False)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def __call__(self, data, axis=None, masked=True, return_bounds=False,
copy=True):
"""
Perform sigma clipping on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed
to the ``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` and the minimum and maximum clipping
thresholds are returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are
also returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If
`False` and ``masked=True``, then the returned masked array
data will contain the same array as the input ``data`` (if
``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`).
The default is `True`.
Returns
-------
result : flexible
If ``masked=True``, then a `~numpy.ma.MaskedArray` is
returned, where the mask is `True` for clipped values. If
``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the (masked)
array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array
is a flattened 1D `~numpy.ndarray` where the clipped values
have been removed. If ``return_bounds=True`` then the
returned minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the
output `~numpy.ndarray` will have the same shape as the
input ``data`` and contain ``np.nan`` where values were
clipped. If ``return_bounds=True`` then the returned
minimum and maximum clipping thresholds will be be
`~numpy.ndarray`\\s.
"""
data = np.asanyarray(data)
if data.size == 0:
return data
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return data
# These two cases are treated separately because when
# ``axis=None`` we can simply remove clipped values from the
# array. This is not possible when ``axis`` is specified, so
# instead we replace clipped values with NaNs as a placeholder
# value.
if axis is None:
return self._sigmaclip_noaxis(data, masked=masked,
return_bounds=return_bounds,
copy=copy)
else:
return self._sigmaclip_withaxis(data, axis=axis, masked=masked,
return_bounds=return_bounds,
copy=copy)
@deprecated_renamed_argument('iters', 'maxiters', '3.1')
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', axis=None, masked=True,
return_bounds=False, copy=True):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int]))
data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int]))
Invalid data values (i.e. NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class. Also, its
input data cannot be a masked array and it does not handle data
that contains invalid values (i.e. NaN or inf). Also note that
it uses the mean as the centering function.
If your data is a `~numpy.ndarray` with no invalid values and
you want to use the mean as the centering function with
``axis=None`` and iterate to convergence, then
`scipy.stats.sigmaclip` is ~25-30% faster than the equivalent
settings here (``sigma_clip(data, cenfunc='mean', maxiters=None,
axis=None)``).
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g. `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/kwgoodman/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where the
mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` and the minimum and maximum clipping thresholds
are returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). The default is
`True`.
Returns
-------
result : flexible
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values. If
``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the (masked)
array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array is
a flattened 1D `~numpy.ndarray` where the clipped values have
been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the output
`~numpy.ndarray` will have the same shape as the input ``data``
and contain ``np.nan`` where values were clipped. If
``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\\s.
See Also
--------
SigmaClip, sigma_clipped_stats
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative to
the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc)
return sigclip(data, axis=axis, masked=masked,
return_bounds=return_bounds, copy=copy)
@deprecated_renamed_argument('iters', 'maxiters', '3.1')
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', std_ddof=0,
axis=None):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g. `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/kwgoodman/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed
to the ``cenfunc`` and ``stdfunc``. The default is `None`.
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc)
data_clipped = sigclip(data, axis=axis, masked=False, return_bounds=False,
copy=False)
if HAS_BOTTLENECK:
mean = _nanmean(data_clipped, axis=axis)
median = _nanmedian(data_clipped, axis=axis)
std = _nanstd(data_clipped, ddof=std_ddof, axis=axis)
else: # pragma: no cover
mean = np.nanmean(data_clipped, axis=axis)
median = np.nanmedian(data_clipped, axis=axis)
std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std
|
a790cbc00a7f459bea2769486b2eefac45b8172063c49775420494c05f5a6c90 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains statistical tools provided for or used by Astropy.
While the `scipy.stats` package contains a wide range of statistical
tools, it is a general-purpose package, and is missing some that are
particularly useful to astronomy or are used in an atypical way in
astronomy. This package is intended to provide such functionality, but
*not* to replace `scipy.stats` if its implementation satisfies
astronomers' needs.
"""
from .funcs import *
from .biweight import *
from .sigma_clipping import *
from .jackknife import *
from .circstats import *
from .bayesian_blocks import *
from .histogram import *
from .info_theory import *
from .lombscargle import *
from .spatial import *
from .bls import *
|
61c08e9e7074a435c3709fafe849ee05b9de48150f2d1b621a3ea47aea5272eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for model selection.
"""
import numpy as np
__all__ = ['bayesian_info_criterion', 'bayesian_info_criterion_lsq',
'akaike_info_criterion', 'akaike_info_criterion_lsq']
__doctest_requires__ = {'bayesian_info_criterion_lsq': ['scipy'],
'akaike_info_criterion_lsq': ['scipy']}
def bayesian_info_criterion(log_likelihood, n_params, n_samples):
r""" Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
return n_params*np.log(n_samples) - 2.0*log_likelihood
def bayesian_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model.
>>> # Bounds are not really needed but included here to demonstrate usage.
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5,
... bounds={"x_0": (-5., 5.)})
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +FLOAT_CMP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<http://docs.astropy.org/en/stable/modeling>
"""
return bayesian_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
def akaike_info_criterion(log_likelihood, n_params, n_samples):
r"""
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
<http://theses.ulaval.ca/archimede/fichiers/21842/apa.html>
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
# Correction in case of small number of observations
if n_samples/float(n_params) >= 40.0:
aic = 2.0 * (n_params - log_likelihood)
else:
aic = (2.0 * (n_params - log_likelihood) +
2.0 * n_params * (n_params + 1.0) /
(n_samples - n_params - 1.0))
return aic
def akaike_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2., .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-660.41075962620482
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 2.4. We should reject the model g1_fit.
References
----------
.. [1] Akaike Information Criteria
<http://avesbiodiv.mncn.csic.es/estadistica/ejemploaic.pdf>
.. [2] Hu, S. Akaike Information Criterion.
<http://www4.ncsu.edu/~shu3/Presentation/AIC.pdf>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
"""
return akaike_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
|
f27688c5ec5b240fbf14a5eedc6290f96fb12e639c87a61a162b8bf5a008999c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Methods for selecting the bin width of histograms
Ported from the astroML project: http://astroML.org/
"""
import numpy as np
from . import bayesian_blocks
__all__ = ['histogram', 'scott_bin_width', 'freedman_bin_width',
'knuth_bin_width']
def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as ``numpy.histogram()``.
Parameters
----------
a : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None (optional)
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array_like, optional
Not Implemented
other keyword arguments are described in numpy.histogram().
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError("weights are not yet supported "
"for the enhanced histogram")
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
if bins == 'blocks':
bins = bayesian_blocks(a)
elif bins == 'knuth':
da, bins = knuth_bin_width(a, True)
elif bins == 'scott':
da, bins = scott_bin_width(a, True)
elif bins == 'freedman':
da, bins = freedman_bin_width(a, True)
else:
raise ValueError("unrecognized bin code: '{}'".format(bins))
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
try:
bins = dmin + dx * np.arange(Nbins + 1)
except ValueError as e:
if 'Maximum allowed size exceeded' in str(e):
raise ValueError(
'The inter-quartile range of the data is too small: '
'failed to construct histogram with {} bins. '
'Please use another bin method, such as '
'bins="scott"'.format(Nbins + 1))
else: # Something else # pragma: no cover
raise
return dx, bins
else:
return dx
def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
quiet : bool (optional)
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
class _KnuthF:
r"""Class which implements the function minimized by knuth_bin_width
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M):
"""Return the bin edges given a width dx"""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function
Parameters
----------
dx : float
Width of bins
Returns
-------
F : float
evaluation of the negative Knuth likelihood function:
smaller values indicate a better fit.
"""
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(self.n * np.log(M) +
self.gammaln(0.5 * M) -
M * self.gammaln(0.5) -
self.gammaln(self.n + 0.5 * M) +
np.sum(self.gammaln(nk + 0.5)))
|
5bc8651df0a7400e02ac901037b7f450e55c8057f1178e8f94188cf78a658962 | """
Table property for providing information about table.
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import os
from contextlib import contextmanager
from inspect import isclass
import numpy as np
from ..utils.data_info import DataInfo
__all__ = ['table_info', 'TableInfo', 'serialize_method_as']
def table_info(tbl, option='attributes', out=''):
"""
Write summary information about column to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: basic column meta data like ``dtype`` or ``format``
- ``stats``: basic statistics: minimum, mean, and maximum
If a function is specified then that function will be called with the
column as its single argument. The function must return an OrderedDict
containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table.table_helpers import simple_table
>>> t = simple_table(size=2, kinds='if')
>>> t['a'].unit = 'm'
>>> t.info()
<Table length=2>
name dtype unit
---- ------- ----
a int64 m
b float64
>>> t.info('stats')
<Table length=2>
name mean std min max
---- ---- --- --- ---
a 1.5 0.5 1 2
b 1.5 0.5 1.0 2.0
Parameters
----------
option : str, function, list of (str or function)
Info option, defaults to 'attributes'.
out : file-like object, None
Output destination, default is sys.stdout. If None then a
Table with information attributes is returned
Returns
-------
info : `~astropy.table.Table` if out==None else None
"""
from .table import Table
if out == '':
out = sys.stdout
descr_vals = [tbl.__class__.__name__]
if tbl.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(tbl)))
outlines = ['<' + ' '.join(descr_vals) + '>']
cols = tbl.columns.values()
if tbl.colnames:
infos = []
for col in cols:
infos.append(col.info(option, out=None))
info = Table(infos, names=list(infos[0]))
else:
info = Table()
if out is None:
return info
# Since info is going to a filehandle for viewing then remove uninteresting
# columns.
if 'class' in info.colnames:
# Remove 'class' info column if all table columns are the same class
# and they are the default column class for that table.
uniq_types = set(type(col) for col in cols)
if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass):
del info['class']
if 'n_bad' in info.colnames and np.all(info['n_bad'] == 0):
del info['n_bad']
# Standard attributes has 'length' but this is typically redundant
if 'length' in info.colnames and np.all(info['length'] == len(tbl)):
del info['length']
for name in info.colnames:
if info[name].dtype.kind in 'SU' and np.all(info[name] == ''):
del info[name]
if tbl.colnames:
outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False))
else:
outlines.append('<No columns>')
out.writelines(outline + os.linesep for outline in outlines)
class TableInfo(DataInfo):
_parent = None
def __call__(self, option='attributes', out=''):
return table_info(self._parent, option, out)
__call__.__doc__ = table_info.__doc__
@contextmanager
def serialize_method_as(tbl, serialize_method):
"""Context manager to temporarily override individual
column info.serialize_method dict values. The serialize_method
attribute is an optional dict which might look like ``{'fits':
'jd1_jd2', 'ecsv': 'formatted_value', ..}``.
``serialize_method`` is a str or dict. If str then it the the value
is the ``serialize_method`` that will be used for all formats.
If dict then the key values can be either:
- Column name. This has higher precedence than the second option of
matching class.
- Class (matches any column which is an instance of the class)
This context manager is expected to be used only within ``Table.write``.
It could have been a private method on Table but prefer not to add
clutter to that class.
Parameters
----------
tbl : Table object
Input table
serialize_method : dict, str
Dict with key values of column names or types, or str
Returns
-------
None (context manager)
"""
def get_override_sm(col):
"""
Determine if the ``serialize_method`` str or dict specifies an
override of column presets for ``col``. Returns the matching
serialize_method value or ``None``.
"""
# If a string then all columns match
if isinstance(serialize_method, str):
return serialize_method
# If column name then return that serialize_method
if col.info.name in serialize_method:
return serialize_method[col.info.name]
# Otherwise look for subclass matches
for key in serialize_method:
if isclass(key) and isinstance(col, key):
return serialize_method[key]
return None
# Setup for the context block. Set individual column.info.serialize_method
# values as appropriate and keep a backup copy. If ``serialize_method``
# is None or empty then don't do anything.
if serialize_method:
# Original serialize_method dict, keyed by column name. This only
# gets set if there is an override.
original_sms = {}
# Go through every column and if it has a serialize_method info
# attribute then potentially update it for the duration of the write.
for col in tbl.itercols():
if hasattr(col.info, 'serialize_method'):
override_sm = get_override_sm(col)
if override_sm:
# Make a reference copy of the column serialize_method
# dict which maps format (e.g. 'fits') to the
# appropriate method (e.g. 'data_mask').
original_sms[col.info.name] = col.info.serialize_method
# Set serialize method for *every* available format. This is
# brute force, but at this point the format ('fits', 'ecsv', etc)
# is not actually known (this gets determined by the write function
# in registry.py). Note this creates a new temporary dict object
# so that the restored version is the same original object.
col.info.serialize_method = {fmt: override_sm
for fmt in col.info.serialize_method}
# Finally yield for the context block
try:
yield
finally:
# Teardown (restore) for the context block. Be sure to do this even
# if an exception occurred.
if serialize_method:
for name, original_sm in original_sms.items():
tbl[name].info.serialize_method = original_sm
|
40566103466e98c75986a99ed7eff9590a9aca80557060013356dbe2ecd5bdc7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MinValue, MaxValue
from .sorted_array import SortedArray
from ..time import Time
class QueryError(ValueError):
'''
Indicates that a given index cannot handle the supplied query.
'''
pass
class Index:
'''
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
FastBST, FastRBT, and SCEngine) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
# If (and only if) unpickling for protocol >= 2, then args and kwargs
# are both empty. The class __init__ requires at least the `columns`
# arg. In this case return a bare `Index` object which is then morphed
# by the unpickling magic into the correct SlicedIndex object.
if not args and not kwargs:
return self
self.__init__(*args, **kwargs)
return SlicedIndex(self, slice(0, 0, None), original=True)
def __init__(self, columns, engine=None, unique=False):
from .table import Table, Column
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(col.jd, format='jd')
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
'''
Number of rows in index.
'''
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
'''
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
'''
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
'''
Recreate the index based on data in self.columns.
'''
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
'''
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
'''
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError("Column does not belong to index: {0}".format(col_name))
def insert_row(self, pos, vals, columns):
'''
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
'''
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[i] = vals[self.col_position(col.info.name)]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
'''
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
'''
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError("Expected int, array of ints, or slice but "
"got {0} in remove_rows".format(row_specifier))
def remove_rows(self, row_specifier):
'''
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
'''
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
'''
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
'''
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple([col[row] for col in self.columns]), row):
raise ValueError("Could not remove row {0} from index".format(row))
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
'''
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
'''
return self.data.find(key)
def same_prefix(self, key):
'''
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
'''
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
'''
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
'''
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
'''
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = dict((row, i) for i, row in enumerate(col_slice))
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self)
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
else: # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.copy().insert_row(self.orig_coords(pos), vals,
columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.copy().sort()
def __repr__(self):
if self.original:
return repr(self.index)
return 'Index slice {0} of\n{1}'.format(
(self.start, self.stop, self.step), self.index)
def __str__(self):
return repr(self)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
return Index([col_slice], engine=self.data.__class__)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
return Index(new_cols, engine=self.data.__class__)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy):
'''
Inputs a table and some subset of its columns, and
returns an index corresponding to this subset or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`
Subset of the columns in the table argument
'''
cols = set(table_copy.columns)
for column in cols:
for index in table[column].info.indices:
if set([x.info.name for x in index.columns]) == cols:
return index
return None
def get_index_by_names(table, names):
'''
Returns an index in ``table`` corresponding to the ``names`` columns or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
nmaes : tuple, list
Column names
'''
names = list(names)
for index in table.indices:
index_names = [col.info.name for col in index.columns]
if index_names == names:
return index
else:
return None
class _IndexModeContext:
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{0}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif self.mode == 'copy_on_getitem':
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'discard_on_copy':
self.table._copy_indices = True
elif self.mode == 'copy_on_getitem':
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = '_{0}WithIndexCopy'.format(cls.__name__)
new_cls = type(str(clsname), (cls,), {'__getitem__': __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
'''
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
'''
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
'''
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
'''
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError("No index found for {0}".format(item))
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError('No matches found for key {0}'.format(key))
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(key))
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError('Right side should contain {0} values'.format(len(rows)))
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
'''
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError('Invalid index for iloc: {0}'.format(item))
return table_slice
|
035ca2b5e29361db608a675640761faef4555ff99a022b44a1d85b4b531d8669 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import operator
import numpy as np
class Row:
"""A class to represent one row of a Table object.
A Row object is returned when a Table object is indexed with an integer
or when iterating over a table::
>>> from astropy.table import Table
>>> table = Table([(1, 2), (3, 4)], names=('a', 'b'),
... dtype=('int32', 'int32'))
>>> row = table[1]
>>> row
<Row index=1>
a b
int32 int32
----- -----
2 4
>>> row['a']
2
>>> row[1]
4
"""
def __init__(self, table, index):
self._table = table
self._index = operator.index(index)
n = len(table)
if index < -n or index >= n:
raise IndexError('index {0} out of range for table with length {1}'
.format(index, len(table)))
def __getitem__(self, item):
if self._table._is_list_or_tuple_of_str(item):
cols = [self._table[name] for name in item]
out = self._table.__class__(cols, copy=False)[self._index]
else:
out = self._table.columns[item][self._index]
return out
def __setitem__(self, item, val):
if self._table._is_list_or_tuple_of_str(item):
self._table._set_row(self._index, colnames=item, vals=val)
else:
self._table.columns[item][self._index] = val
def _ipython_key_completions_(self):
return self.colnames
def __eq__(self, other):
if self._table.masked:
# Sent bug report to numpy-discussion group on 2012-Oct-21, subject:
# "Comparing rows in a structured masked array raises exception"
# No response, so this is still unresolved.
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() == other
def __ne__(self, other):
if self._table.masked:
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() != other
def __array__(self, dtype=None):
"""Support converting Row to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
If the parent table is masked then the mask information is dropped.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
return np.asarray(self.as_void())
def __len__(self):
return len(self._table.columns)
def __iter__(self):
index = self._index
for col in self._table.columns.values():
yield col[index]
@property
def table(self):
return self._table
@property
def index(self):
return self._index
def as_void(self):
"""
Returns a *read-only* copy of the row values in the form of np.void or
np.ma.mvoid objects. This corresponds to the object types returned for
row indexing of a pure numpy structured array or masked array. This
method is slow and its use is discouraged when possible.
Returns
-------
void_row : np.void (unmasked) or np.ma.mvoid (masked)
Copy of row values
"""
index = self._index
cols = self._table.columns.values()
vals = tuple(np.asarray(col)[index] for col in cols)
if self._table.masked:
# The logic here is a little complicated to work around
# bug in numpy < 1.8 (numpy/numpy#483). Need to build up
# a np.ma.mvoid object by hand.
from .table import descr
# Make np.void version of masks. Use the table dtype but
# substitute bool for data type
masks = tuple(col.mask[index] if hasattr(col, 'mask') else False
for col in cols)
descrs = (descr(col) for col in cols)
mask_dtypes = [(name, bool, shape) for name, type_, shape in descrs]
row_mask = np.array([masks], dtype=mask_dtypes)[0]
# Make np.void version of values, and then the final mvoid row
row_vals = np.array([vals], dtype=self.dtype)[0]
void_row = np.ma.mvoid(data=row_vals, mask=row_mask)
else:
void_row = np.array([vals], dtype=self.dtype)[0]
return void_row
@property
def meta(self):
return self._table.meta
@property
def columns(self):
return self._table.columns
@property
def colnames(self):
return self._table.colnames
@property
def dtype(self):
return self._table.dtype
def _base_repr_(self, html=False):
"""
Display row as a single-line table but with appropriate header line.
"""
index = self.index if (self.index >= 0) else self.index + len(self._table)
table = self._table[index:index + 1]
descr_vals = [self.__class__.__name__,
'index={0}'.format(self.index)]
if table.masked:
descr_vals.append('masked=True')
return table._base_repr_(html, descr_vals, max_width=-1,
tableid='table{0}'.format(id(self._table)))
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
index = self.index if (self.index >= 0) else self.index + len(self._table)
return '\n'.join(self.table[index:index + 1].pformat(max_width=-1))
def __bytes__(self):
return str(self).encode('utf-8')
collections.Sequence.register(Row)
|
173607ebb474d8525de17a7fd486c457aaba414e41dac269ad4584aee4ab5480 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import platform
import warnings
import numpy as np
from .index import get_index_by_names
from ..utils.exceptions import AstropyUserWarning
__all__ = ['TableGroups', 'ColumnGroups']
def table_group_by(table, keys):
# index copies are unnecessary and slow down _table_group_by
with table.index_mode('discard_on_copy'):
return _table_group_by(table, keys)
def _table_group_by(table, keys):
"""
Get groups for ``table`` on specified ``keys``.
Parameters
----------
table : `Table`
Table to group
keys : str, list of str, `Table`, or Numpy array
Grouping key specifier
Returns
-------
grouped_table : Table object with groups attr set accordingly
"""
from .table import Table
from .serialize import _represent_mixins_as_columns
# Pre-convert string to tuple of strings, or Table to the underlying structured array
if isinstance(keys, str):
keys = (keys,)
if isinstance(keys, (list, tuple)):
for name in keys:
if name not in table.colnames:
raise ValueError('Table does not have key column {0!r}'.format(name))
if table.masked and np.any(table[name].mask):
raise ValueError('Missing values in key column {0!r} are not allowed'.format(name))
# Make a column slice of the table without copying
table_keys = table.__class__([table[key] for key in keys], copy=False)
# If available get a pre-existing index for these columns
table_index = get_index_by_names(table, keys)
grouped_by_table_cols = True
elif isinstance(keys, (np.ndarray, Table)):
table_keys = keys
if len(table_keys) != len(table):
raise ValueError('Input keys array length {0} does not match table length {1}'
.format(len(table_keys), len(table)))
table_index = None
grouped_by_table_cols = False
else:
raise TypeError('Keys input must be string, list, tuple, Table or numpy array, but got {0}'
.format(type(keys)))
# If there is not already an available index and table_keys is a Table then ensure
# that all cols (including mixins) are in a form that can sorted with the code below.
if not table_index and isinstance(table_keys, Table):
table_keys = _represent_mixins_as_columns(table_keys)
# Get the argsort index `idx_sort`, accounting for particulars
try:
# take advantage of index internal sort if possible
if table_index is not None:
idx_sort = table_index.sorted_data()
else:
idx_sort = table_keys.argsort(kind='mergesort')
stable_sort = True
except TypeError:
# Some versions (likely 1.6 and earlier) of numpy don't support
# 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable
# sort by default, nor does Windows, while Linux does (or appears to).
idx_sort = table_keys.argsort()
stable_sort = platform.system() not in ('Darwin', 'Windows')
# Finally do the actual sort of table_keys values
table_keys = table_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# If the sort is not stable (preserves original table order) then sort idx_sort in
# place within each group.
if not stable_sort:
for i0, i1 in zip(indices[:-1], indices[1:]):
idx_sort[i0:i1].sort()
# Make a new table and set the _groups to the appropriate TableGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = table.__class__(table[idx_sort])
out_keys = table_keys[indices[:-1]]
if isinstance(out_keys, Table):
out_keys.meta['grouped_by_table_cols'] = grouped_by_table_cols
out._groups = TableGroups(out, indices=indices, keys=out_keys)
return out
def column_group_by(column, keys):
"""
Get groups for ``column`` on specified ``keys``
Parameters
----------
column : Column object
Column to group
keys : Table or Numpy array of same length as col
Grouping key specifier
Returns
-------
grouped_column : Column object with groups attr set accordingly
"""
from .table import Table
from .serialize import _represent_mixins_as_columns
if isinstance(keys, Table):
keys = _represent_mixins_as_columns(keys)
keys = keys.as_array()
if not isinstance(keys, np.ndarray):
raise TypeError('Keys input must be numpy array, but got {0}'
.format(type(keys)))
if len(keys) != len(column):
raise ValueError('Input keys array length {0} does not match column length {1}'
.format(len(keys), len(column)))
idx_sort = keys.argsort()
keys = keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# Make a new column and set the _groups to the appropriate ColumnGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = column.__class__(column[idx_sort])
out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]])
return out
class BaseGroups:
"""
A class to represent groups within a table of heterogeneous data.
- ``keys``: key values corresponding to each group
- ``indices``: index values in parent table or column corresponding to group boundaries
- ``aggregate()``: method to create new table by aggregating within groups
"""
@property
def parent(self):
return self.parent_column if isinstance(self, ColumnGroups) else self.parent_table
def __iter__(self):
self._iter_index = 0
return self
def next(self):
ii = self._iter_index
if ii < len(self.indices) - 1:
i0, i1 = self.indices[ii], self.indices[ii + 1]
self._iter_index += 1
return self.parent[i0:i1]
else:
raise StopIteration
__next__ = next
def __getitem__(self, item):
parent = self.parent
if isinstance(item, (int, np.integer)):
i0, i1 = self.indices[item], self.indices[item + 1]
out = parent[i0:i1]
out.groups._keys = parent.groups.keys[item]
else:
indices0, indices1 = self.indices[:-1], self.indices[1:]
try:
i0s, i1s = indices0[item], indices1[item]
except Exception:
raise TypeError('Index item for groups attribute must be a slice, '
'numpy mask or int array')
mask = np.zeros(len(parent), dtype=bool)
# Is there a way to vectorize this in numpy?
for i0, i1 in zip(i0s, i1s):
mask[i0:i1] = True
out = parent[mask]
out.groups._keys = parent.groups.keys[item]
out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)])
return out
def __repr__(self):
return '<{0} indices={1}>'.format(self.__class__.__name__, self.indices)
def __len__(self):
return len(self.indices) - 1
class ColumnGroups(BaseGroups):
def __init__(self, parent_column, indices=None, keys=None):
self.parent_column = parent_column # parent Column
self.parent_table = parent_column.parent_table
self._indices = indices
self._keys = keys
@property
def indices(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.indices
else:
if self._indices is None:
return np.array([0, len(self.parent_column)])
else:
return self._indices
@property
def keys(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.keys
else:
return self._keys
def aggregate(self, func):
from .column import MaskedColumn
i0s, i1s = self.indices[:-1], self.indices[1:]
par_col = self.parent_column
masked = isinstance(par_col, MaskedColumn)
reduceat = hasattr(func, 'reduceat')
sum_case = func is np.sum
mean_case = func is np.mean
try:
if not masked and (reduceat or sum_case or mean_case):
if mean_case:
vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices)
else:
if sum_case:
func = np.add
vals = func.reduceat(par_col, i0s)
else:
vals = np.array([func(par_col[i0: i1]) for i0, i1 in zip(i0s, i1s)])
except Exception:
raise TypeError("Cannot aggregate column '{0}' with type '{1}'"
.format(par_col.info.name,
par_col.info.dtype))
out = par_col.__class__(data=vals,
name=par_col.info.name,
description=par_col.info.description,
unit=par_col.info.unit,
format=par_col.info.format,
meta=par_col.info.meta)
return out
def filter(self, func):
"""
Filter groups in the Column based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept one argument:
- ``column`` : `Column` object
It must then return either `True` or `False`. As an example, the following
will select all column groups with only positive values::
def all_positive(column):
if np.any(column < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Column
New column with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
for i, group_column in enumerate(self):
mask[i] = func(group_column)
return self[mask]
class TableGroups(BaseGroups):
def __init__(self, parent_table, indices=None, keys=None):
self.parent_table = parent_table # parent Table
self._indices = indices
self._keys = keys
@property
def key_colnames(self):
"""
Return the names of columns in the parent table that were used for grouping.
"""
# If the table was grouped by key columns *in* the table then treat those columns
# differently in aggregation. In this case keys will be a Table with
# keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we
# need to handle this.
grouped_by_table_cols = getattr(self.keys, 'meta', {}).get('grouped_by_table_cols', False)
return self.keys.colnames if grouped_by_table_cols else ()
@property
def indices(self):
if self._indices is None:
return np.array([0, len(self.parent_table)])
else:
return self._indices
def aggregate(self, func):
"""
Aggregate each group in the Table into a single row by applying the reduction
function ``func`` to group values in each column.
Parameters
----------
func : function
Function that reduces an array of values to a single value
Returns
-------
out : Table
New table with the aggregated rows.
"""
i0s, i1s = self.indices[:-1], self.indices[1:]
out_cols = []
parent_table = self.parent_table
for col in parent_table.columns.values():
# For key columns just pick off first in each group since they are identical
if col.info.name in self.key_colnames:
new_col = col.take(i0s)
else:
try:
new_col = col.groups.aggregate(func)
except TypeError as err:
warnings.warn(str(err), AstropyUserWarning)
continue
out_cols.append(new_col)
return parent_table.__class__(out_cols, meta=parent_table.meta)
def filter(self, func):
"""
Filter groups in the Table based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept two arguments:
- ``table`` : `Table` object
- ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping
It must then return either `True` or `False`. As an example, the following
will select all table groups with only positive values in the non-key columns::
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Table
New table with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
key_colnames = self.key_colnames
for i, group_table in enumerate(self):
mask[i] = func(group_table, key_colnames)
return self[mask]
@property
def keys(self):
return self._keys
|
4ddf816436c6fcfd5e48a6c0fec01a15f1df7df4272429e3f5fa8192d1f24a82 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import TableIndices, TableLoc, TableILoc, TableLocIndices
import re
import sys
from collections import OrderedDict, Mapping
import warnings
from copy import deepcopy
import numpy as np
from numpy import ma
from .. import log
from ..io import registry as io_registry
from ..units import Quantity, QuantityInfo
from ..utils import isiterable, ShapedLikeNDArray
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from ..utils.exceptions import AstropyDeprecationWarning, NoValue
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name, recarray_fromrecords
from .info import TableInfo, serialize_method_as
from .index import Index, _IndexModeContext, get_index
from . import conf
__doctest_skip__ = ['Table.read', 'Table.write',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value):
if item in self:
raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = ("'{0}'".format(x) for x in self.keys())
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError("Column {0} already exists".format(new_name))
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class Table:
"""A class to represent tables of heterogeneous data.
`Table` provides a class for heterogeneous tabular data, making use of a
`numpy` structured array internally to store the data values. A key
enhancement provided by the `Table` class is the ability to easily modify
the structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`Table` differs from `~astropy.nddata.NDData` by the assumption that the
input data consists of columns of homogeneous data, where each column
has a unique identifier and may contain additional metadata such as the
data unit, format, and description.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData()
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
def as_array(self, keep_byteorder=False):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
if len(self.columns) == 0:
return None
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
empty_init = ma.empty if self.masked else np.empty
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
return data
def __init__(self, data=None, masked=None, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.meta = meta
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
is_list_of_dict = False
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if all(isinstance(row, dict) for row in rows):
is_list_of_dict = True # Avoid doing the all(...) test twice.
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
rec_data = recarray_fromrecords(rows)
data = [rec_data[name] for name in rec_data.dtype.names]
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied (though the meta
# will be deep-copied anyway).
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
init_func = self._init_from_list
if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)):
n_cols = len(data[0])
else:
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
init_func = self._init_from_table
n_cols = len(data.colnames)
default_names = data.colnames
# don't copy indices if the input Table is in non-copy mode
self._init_indices = self._init_indices and data._copy_indices
elif data is None:
if names is None:
if dtype is None:
return # Empty table
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {0} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support bytes column names on Python 3, so fix them
# up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Whatever happens above, the masked property should be set to a boolean
if type(self.masked) is not bool:
raise TypeError("masked property has not been set to True or False")
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked:
mask_table = Table([col.mask for col in self.columns.values()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked:
data = [col.filled(fill_value) for col in self.columns.values()]
else:
data = self
return self.__class__(data, meta=deepcopy(self.meta))
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, FastRBT, and SCEngine. If the supplied argument is None
(by default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{0}", of '
'type "{1}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
return self.as_array().data if self.masked else self.as_array()
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError('{0} must be a list or None'.format(inp_str))
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _set_masked_from_cols(self, cols):
if self.masked is None:
if any(isinstance(col, (MaskedColumn, ma.MaskedArray)) for col in cols):
self._set_masked(True)
else:
self._set_masked(False)
elif not self.masked:
if any(np.any(col.mask) for col in cols if isinstance(col, (MaskedColumn, ma.MaskedArray))):
self._set_masked(True)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
names_from_data = set()
for row in data:
names_from_data.update(row)
cols = {}
for name in names_from_data:
cols[name] = []
for i, row in enumerate(data):
try:
cols[name].append(row[name])
except KeyError:
raise ValueError('Row {0} has no value for column {1}'.format(i, name))
if all(name is None for name in names):
names = sorted(names_from_data)
self._init_from_dict(cols, names, dtype, n_cols, copy)
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of columns. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
if data and all(isinstance(row, dict) for row in data):
self._init_from_list_of_dicts(data, names, dtype, n_cols, copy)
return
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(data)
cols = []
def_names = _auto_names(n_cols)
for col, name, def_name, dtype in zip(data, names, def_names, dtype):
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (isinstance(col, np.ndarray) and len(col.dtype) > 1 and
not self._add_as_mixin_column(col)):
col = col.view(NdarrayMixin)
if isinstance(col, (Column, MaskedColumn)):
col = self.ColumnClass(name=(name or col.info.name or def_name),
data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
elif self._add_as_mixin_column(col):
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
if copy:
col = col_copy(col, copy_indices=self._init_indices)
col.info.name = name or col.info.name or def_name
elif isinstance(col, np.ndarray) or isiterable(col):
col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
else:
raise ValueError('Elements in list initialization must be '
'either Column or list-like')
cols.append(col)
self._init_from_cols(cols)
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(cols)
if copy:
self._init_from_list(cols, names, dtype, n_cols, copy)
else:
dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)]
newdata = data.view(dtype).ravel()
columns = self.TableColumns()
for name in names:
columns[name] = self.ColumnClass(name=name, data=newdata[name])
columns[name].info.parent_table = self
self.columns = columns
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
# TODO: is this restriction still needed with no ndarray?
if not copy:
raise ValueError('Cannot use copy=False with a dict data input')
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _init_from_table(self, data, names, dtype, n_cols, copy):
"""Initialize table from an existing Table object """
table = data # data is really a Table, rename for clarity
self.meta.clear()
self.meta.update(deepcopy(table.meta))
self.primary_key = table.primary_key
cols = list(table.columns.values())
self._init_from_list(cols, names, dtype, n_cols, copy)
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if col.__class__ is not self.ColumnClass and isinstance(col, Column):
col = self.ColumnClass(col) # copy attributes and reference data
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) != 1:
raise ValueError('Inconsistent data column lengths: {0}'
.format(lengths))
# Set the table masking
self._set_masked_from_cols(cols)
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
table.meta.clear()
table.meta.update(deepcopy(self.meta))
table.primary_key = self.primary_key
cols = self.columns.values()
newcols = []
for col in cols:
col.info._copy_indices = self._copy_indices
newcol = col[slice_]
if col.info.indices:
newcol = col.info.slice_indices(newcol, slice_, len(col))
newcols.append(newcol)
col.info._copy_indices = True
self._make_table_from_cols(table, newcols)
return table
@staticmethod
def _make_table_from_cols(table, cols):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
colnames = set(col.info.name for col in cols)
if None in colnames:
raise TypeError('Cannot have None for column name')
if len(colnames) != len(cols):
raise ValueError('Duplicate column names')
columns = table.TableColumns((col.info.name, col) for col in cols)
for col in cols:
col.info.parent_table = table
if table.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
table.columns = columns
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(self)))
descr = ' '.join(descr_vals)
if html:
from ..utils.xml.writer import xml_escape
descr = '<i>{0}</i>\n'.format(xml_escape(descr))
else:
descr = '<{0}>\n'.format(descr)
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int
Maximum number of lines in table output.
max_width : int or `None`
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <http://getbootstrap.com/css/#tables>`_
for the list of classes.
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{0}-{1}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error("Browser '{}' not found.".format(browser))
else:
br.open(urljoin('file:', pathname2url(path)))
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
return lines
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
NewColumn = self.MaskedColumn if self.masked else self.Column
# If value doesn't have a dtype and won't be added as a mixin then
# convert to a numpy array.
if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value):
value = np.asarray(value)
# Structured ndarray gets viewed as a mixin (unless already a valid
# mixin class).
if (isinstance(value, np.ndarray) and len(value.dtype) > 1 and
not self._add_as_mixin_column(value)):
value = value.view(NdarrayMixin)
# Make new column and assign the value. If the table currently
# has no rows (len=0) of the value is already a Column then
# define new column directly from value. In the latter case
# this allows for propagation of Column metadata. Otherwise
# define a new column with the right length and shape and then
# set it from value. This allows for broadcasting, e.g. t['a']
# = 1.
name = item
# If this is a column-like object that could be added directly to table
if isinstance(value, BaseColumn) or self._add_as_mixin_column(value):
# If we're setting a new column to a scalar, broadcast it.
# (things will fail in _init_from_cols if this doesn't work)
if (len(self) > 0 and (getattr(value, 'isscalar', False) or
getattr(value, 'shape', None) == () or
len(value) == 1)):
new_shape = (len(self),) + getattr(value, 'shape', ())[1:]
if isinstance(value, np.ndarray):
value = np.broadcast_to(value, shape=new_shape,
subok=True)
elif isinstance(value, ShapedLikeNDArray):
value = value._apply(np.broadcast_to, shape=new_shape,
subok=True)
new_column = col_copy(value)
new_column.info.name = name
elif len(self) == 0:
new_column = NewColumn(value, name=name)
else:
new_column = NewColumn(name=name, length=len(self), dtype=value.dtype,
shape=value.shape[1:],
unit=getattr(value, 'unit', None))
new_column[:] = value
# Now add new column to the table
self.add_columns([new_column], copy=False)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray)) and
all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray)) and
np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if hasattr(self, '_masked'):
# The only allowed change is from None to False or True, or False to True
if self._masked is None and masked in [False, True]:
self._masked = masked
elif self._masked is False and masked is True:
log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.")
self._masked = masked
elif self._masked is masked:
raise Exception("Masked attribute is already set to {0}".format(masked))
else:
raise Exception("Cannot change masked attribute to {0} once it is set to {1}"
.format(masked, self._masked))
else:
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
if self._masked:
self._column_class = self.MaskedColumn
else:
self._column_class = self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names and
all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def __len__(self):
if len(self.columns) == 0:
return 0
lengths = set(len(col) for col in self.columns.values())
if len(lengths) != 1:
len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()]
raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs)))
return lengths.pop()
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError("Column {0} does not exist".format(name))
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True):
"""
Add a new Column object ``col`` to the table. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
Parameters
----------
col : Column
Column object to add.
index : int or `None`
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create a third column 'c' and append it to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> col_d = Column(name='d', data=['a', 'b', 'c'])
>>> t.add_column(col_d, 1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
3 c 0.3 z
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> t.add_column(col_b, rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.2
3 0.3 1.3
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(data=['x', 'y'])
>>> t.add_column(col_c)
>>> t.add_column(col_c, name='c')
>>> col_b = Column(name='b', data=[1.1, 1.2])
>>> t.add_column(col_b, name='d')
>>> print(t)
a b col2 c d
--- --- ---- --- ---
1 0.1 x x 1.1
2 0.2 y y 1.2
To add several columns use add_columns.
"""
if index is None:
index = len(self.columns)
if name is not None:
name = (name,)
self.add_columns([col], [index], name, copy=copy, rename_duplicate=rename_duplicate)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new Column objects ``cols`` to the table. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
Parameters
----------
cols : list of Columns
Column objects to add.
indexes : list of ints or `None`
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create column 'c' and 'd' and append them to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
3 0.3 z w
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d], [0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
z 3 w 0.3
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_columns([col_b, col_c], rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
3 0.3 1.3 z
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_a = Column(data=['x', 'y'])
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([col_a, col_b])
>>> t.add_columns([col_a, col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
x u x u
y v y v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if copy:
cols = [col_copy(col) for col in cols]
if len(self.columns) == 0:
# No existing table data, init from cols
newcols = cols
else:
newcols = list(self.columns.values())
new_indexes = list(range(len(newcols) + 1))
for col, index in zip(cols, indexes):
i = new_indexes.index(index)
new_indexes.insert(i, None)
newcols.insert(i, col)
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
for i, (col, name) in enumerate(zip(cols, names)):
if name is None:
if col.info.name is not None:
continue
name = 'col{}'.format(i + len(self.columns))
if col.info.parent_table is not None:
col = col_copy(col)
col.info.name = name
if rename_duplicate:
existing_names = set(self.colnames)
for col in cols:
i = 1
orig_name = col.info.name
if col.info.name in existing_names:
# If the column belongs to another table then copy it
# before renaming
while col.info.name in existing_names:
# Iterate until a unique name is found
if col.info.parent_table is not None:
col = col_copy(col)
new_name = '{0}_{1}'.format(orig_name, i)
col.info.name = new_name
i += 1
existing_names.add(new_name)
self._init_from_cols(newcols)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn("replaced column '{}'".format(name),
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col):
"""
Replace column ``name`` with the new ``col`` object.
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError('column name {0} is not in the table'.format(name))
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
t = self.__class__([col], names=[name])
cols = OrderedDict(self.columns)
cols[name] = t[name]
self._init_from_cols(cols.values())
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
# If there are no `in_kind` columns then do nothing
cols = self.columns.values()
if not any(col.dtype.kind == in_kind for col in cols):
return
newcols = []
for col in cols:
if col.dtype.kind == in_kind:
newdtype = re.sub(in_kind, out_kind, col.dtype.str)
newcol = col.__class__(col, dtype=newdtype)
else:
newcol = col
newcols.append(newcol)
self._init_from_cols(newcols)
def convert_bytestring_to_unicode(self, python3_only=NoValue):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming
ASCII encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
if python3_only is not NoValue:
warnings.warn('The "python3_only" keyword is now deprecated.',
AstropyDeprecationWarning)
self._convert_string_dtype('S', 'U')
def convert_unicode_to_bytestring(self, python3_only=NoValue):
"""
Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S').
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings. This routine takes
advantage of numpy automated conversion which works for strings that
are pure ASCII.
"""
if python3_only is not NoValue:
warnings.warn('The "python3_only" keyword is now deprecated.',
AstropyDeprecationWarning)
self._convert_string_dtype('U', 'S')
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError("Column {0} does not exist".format(name))
self.columns[name].info.name = new_name
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {0} is out of bounds for table with length {1}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if mask is not None and not self.masked:
# Possibly issue upgrade warning and update self.ColumnClass. This
# does not change the existing columns.
self._set_masked(True)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError("Value must be supplied for column '{0}'".format(name))
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If the new row caused a change in self.ColumnClass then
# Column-based classes need to be converted first. This is
# typical for adding a row with mask values to an unmasked table.
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col = self.ColumnClass(col, copy=False)
newcol = col.insert(index, val, axis=0)
if not isinstance(newcol, BaseColumn):
newcol.info.name = name
if self.masked:
newcol.mask = FalseArray(newcol.shape)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {0} after inserting {1}'
' (expected {2}, got {3})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed
if self.masked:
newcol.mask[index] = mask_
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, self[keys])
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self[keys].as_array()
else:
data = self.as_array()
return data.argsort(**kwargs)
def sort(self, keys=None):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name','firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys)
sort_index = get_index(self, self[keys])
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for col in self.columns.values():
col[:] = col.take(indexes, axis=0)
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
@classmethod
def read(cls, *args, **kwargs):
"""
Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
See http://docs.astropy.org/en/stable/io/unified.html for details.
Parameters
----------
format : str
File format specifier.
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is the input filename.
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `Table`
Table corresponding to file contents
Notes
-----
"""
# The hanging Notes section just above is a section placeholder for
# import-time processing that collects available formats into an
# RST table and inserts at the end of the docstring. DO NOT REMOVE.
out = io_registry.read(cls, *args, **kwargs)
# For some readers (e.g., ascii.ecsv), the returned `out` class is not
# guaranteed to be the same as the desired output `cls`. If so,
# try coercing to desired class without copying (io.registry.read
# would normally do a copy). The normal case here is swapping
# Table <=> QTable.
if cls is not out.__class__:
try:
out = cls(out, copy=False)
except Exception:
raise TypeError('could not convert reader output to {0} '
'class.'.format(cls.__name__))
return out
def write(self, *args, **kwargs):
"""Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
See http://docs.astropy.org/en/stable/io/unified.html for details.
Parameters
----------
format : str
File format specifier.
serialize_method : str, dict, optional
Serialization method specifier for columns.
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
serialize_method = kwargs.pop('serialize_method', None)
with serialize_method_as(self, serialize_method):
io_registry.write(self, *args, **kwargs)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
if isinstance(other, Table):
other = other.as_array()
if self.masked:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def __ne__(self, other):
return ~self.__eq__(other)
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`TableGroups` which contains a copy of this table but sorted by row
according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `Table`
Key grouping object
Returns
-------
out : `Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self):
"""
Return a :class:`pandas.DataFrame` instance
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table contains mixin or multi-dimensional columns
"""
from pandas import DataFrame
if self.has_mixin_columns:
raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame")
if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()):
raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame")
out = OrderedDict()
for name, column in self.columns.items():
if isinstance(column, MaskedColumn):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
return DataFrame(out)
@classmethod
def from_pandas(cls, dataframe):
"""
Create a `Table` from a :class:`pandas.DataFrame` instance
Parameters
----------
dataframe : :class:`pandas.DataFrame`
The pandas :class:`pandas.DataFrame` instance
Returns
-------
table : `Table`
A `Table` (or subclass) instance
"""
out = OrderedDict()
for name in dataframe.columns:
column = dataframe[name]
mask = np.array(column.isnull())
data = np.array(column)
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`QTable` provides a class for heterogeneous tabular data which can be
easily modified, for instance adding columns or new rows.
The `QTable` class is identical to `Table` except that columns with an
associated ``unit`` attribute are converted to `~astropy.units.Quantity`
objects.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if (isinstance(col, Column) and getattr(col, 'unit', None) is not None):
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
|
e1743946769a1746846d8873af496d824cf2b7ae2a042ff3dafda10e3eed44af | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .. import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <http://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
['slice'],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'list',
)
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases."
)
conf = Conf()
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
from .groups import TableGroups, ColumnGroups
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning)
from .operations import join, setdiff, hstack, vstack, unique, TableMergeError
from .bst import BST, FastBST, FastRBT
from .sorted_array import SortedArray
from .soco import SCEngine
from .serialize import SerializedColumn
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from ..io import registry
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
from ..io.ascii import connect
from ..io.fits import connect
from ..io.misc import connect
from ..io.votable import connect
|
ead1fa28e1da840513d6595efe967a000db787524f1f079dfc944021abefe118 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import weakref
import re
from copy import deepcopy
import numpy as np
from numpy import ma
# Remove this when Numpy no longer emits this warning and that Numpy version
# becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
try:
from numpy.ma.core import MaskedArrayFutureWarning
except ImportError:
# For Numpy versions that do not raise this warning.
MaskedArrayFutureWarning = None
from ..units import Unit, Quantity
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, dtype_info_name
from ..utils.misc import dtype_bytes_or_chars
from . import groups
from . import pprint
from .np_utils import fix_column_name
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter('always', StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = set(
[np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
# The new column should have None for the parent_table ref. If the
# original parent_table weakref there at the point of copying then it
# generates an infinite recursion. Instead temporarily remove the weakref
# on the original column and restore after the copy in an exception-safe
# manner.
parent_table = col.info.parent_table
indices = col.info.indices
col.info.parent_table = None
col.info.indices = []
try:
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
newcol.info = col.info
newcol.info.indices = deepcopy(indices or []) if copy_indices else []
for index in newcol.info.indices:
index.replace_col(col, newcol)
finally:
col.info.parent_table = parent_table
col.info.indices = indices
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {0} class to True'
.format(self.__class__.__name__))
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attrs_from_parent = BaseColumnInfo.attr_names
_supports_indexing = True
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'unit', 'format', 'description'))
return self._parent_cls(length=length, **attrs)
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
dtype = (np.dtype(dtype).str, shape)
self_data = np.zeros(length, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = deepcopy(data.meta)
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = np.array(data.to(unit), dtype=dtype, copy=copy)
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = deepcopy(data.info.meta)
else:
if np.dtype(dtype).char == 'S':
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = fix_column_name(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, 'indices', [])) if \
copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, '_parent_table', None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', '_unit', '_format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super().__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape or
(isinstance(out_arr, BaseColumn) and
(context is not None and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
val = fix_column_name(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, '_format', None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
"Invalid format for column '{0}': could not display "
"values in this column using this format ({1})".format(
self.name, err.args[0]))
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={}):
yield str_val
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : boolean
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, copy=False, dtype=self.dtype, order='A')
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : `~astropy.units.Unit` or str
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of equivalence pairs, optional
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', '_format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
self.meta = deepcopy(getattr(obj, 'meta', {}))
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == 'U':
arr = np.char.encode(arr, encoding='utf-8')
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super().__new__(
cls, data=data, name=name, dtype=dtype, shape=shape, length=length,
description=description, unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super().__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append('{0}={1!r}'.format(attr, val))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from ..utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
def __bytes__(self):
return str(self).encode('utf-8')
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self_str_len),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
if self.dtype.char == 'S':
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
def _make_compare(oper):
"""
Make comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
"""
swapped_oper = {'__eq__': '__eq__',
'__ne__': '__ne__',
'__gt__': '__lt__',
'__lt__': '__gt__',
'__ge__': '__le__',
'__le__': '__ge__'}[oper]
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# Special case to work around #6838. Other combinations work OK,
# see tests.test_column.test_unicode_sandwich_compare(). In this
# case just swap self and other.
#
# This is related to an issue in numpy that was addressed in np 1.13.
# However that fix does not make this problem go away, but maybe
# future numpy versions will do so. NUMPY_LT_1_13 to get the
# attention of future maintainers to check (by deleting or versioning
# the if block below). See #6899 discussion.
if (isinstance(self, MaskedColumn) and self.dtype.kind == 'U' and
isinstance(other, MaskedColumn) and other.dtype.kind == 'S'):
self, other = other, self
op = swapped_oper
if self.dtype.char == 'S':
other = self._encode_str(other)
return getattr(self.data, op)(other)
return _compare
__eq__ = _make_compare('__eq__')
__ne__ = _make_compare('__ne__')
__gt__ = _make_compare('__gt__')
__lt__ = _make_compare('__lt__')
__ge__ = _make_compare('__ge__')
__le__ = _make_compare('__le__')
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
data = np.insert(self, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumnInfo(ColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information. In this case
it just adds the ``mask_val`` attribute.
"""
# Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. See also code below.
attr_names = ColumnInfo.attr_names | {'serialize_method'}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = 'data'
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {'fits': 'null_value',
'ecsv': 'null_value',
'hdf5': 'data_mask',
None: 'null_value'}
def _represent_as_dict(self):
out = super()._represent_as_dict()
col = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == 'data_mask':
if np.any(col.mask):
# Note that adding to _represent_as_dict_attrs triggers later code which
# will add this to the '__serialized_columns__' meta YAML dict.
out['data'] = col.data.data
out['mask'] = col.mask
self._represent_as_dict_attrs += ('data', 'mask',)
elif method is 'null_value':
pass
else:
raise ValueError('serialize method must be either "data_mask" or "null_value"')
return out
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str or None
Value used when filling masked column elements
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
info = MaskedColumnInfo()
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None:
# Issue #7399 with fix #7422. Passing mask=None to ma.MaskedArray
# is extremely slow (~3 seconds for 1e7 elements), while mask=False
# gets quickly broadcast to the expected bool array of False.
mask = getattr(data, 'mask', False)
if mask is not False:
mask = np.array(mask, copy=copy)
elif mask is np.ma.nomask:
# Force the creation of a full mask array as nomask is tricky to
# use and will fail in an unexpected manner when setting a value
# to the mask.
mask = False
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None and getattr(data, 'fill_value', None) is not None:
# Coerce the fill_value to the correct type since `data` may be a
# different dtype than self.
fill_value = self.dtype.type(data.fill_value)
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
out = self.view(ma.MaskedArray)
# The following is necessary because of a bug in Numpy, which was
# fixed in numpy/numpy#2703. The fix should be included in Numpy 1.8.0.
out.fill_value = self.fill_value
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super().filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
mask : boolean array_like
Mask value(s) to insert. If not supplied then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(values.shape, dtype=bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
out.fill_value = self.fill_value
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if self.dtype.char == 'S':
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(''))
# update indices
self.info.adjust_indices(index, value, len(self))
# Remove this when Numpy no longer emits this warning and that
# Numpy version becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
if MaskedArrayFutureWarning is None:
ma.MaskedArray.__setitem__(self, index, value)
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore', MaskedArrayFutureWarning)
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
d2aa94db0c1108e2ece5bd6ecb2a86c2dc5c5fb60db32f2b526a638a73ab8ce9 | from importlib import import_module
import re
from copy import deepcopy
from collections import OrderedDict
from ..utils.data_info import MixinInfo
from .column import Column
from .table import Table, QTable, has_info_class
from ..units.quantity import QuantityInfo
__construct_mixin_classes = ('astropy.time.core.Time',
'astropy.time.core.TimeDelta',
'astropy.units.quantity.Quantity',
'astropy.coordinates.angles.Latitude',
'astropy.coordinates.angles.Longitude',
'astropy.coordinates.angles.Angle',
'astropy.coordinates.distances.Distance',
'astropy.coordinates.earth.EarthLocation',
'astropy.coordinates.sky_coordinate.SkyCoord',
'astropy.table.table.NdarrayMixin',
'astropy.table.column.MaskedColumn')
class SerializedColumn(dict):
"""
Subclass of dict that is a used in the representation to contain the name
(and possible other info) for a mixin attribute (either primary data or an
array-like attribute) that is serialized as a column in the table.
Normally contains the single key ``name`` with the name of the column in the
table.
"""
pass
def _represent_mixin_as_column(col, name, new_cols, mixin_cols,
exclude_classes=()):
"""Carry out processing needed to serialize ``col`` in an output table
consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This
relies on the object determine if any transformation is required and may
depend on the ``serialize_method`` and ``serialize_context`` context
variables. For instance a ``MaskedColumn`` may be stored directly to
FITS, but can also be serialized as separate data and mask columns.
This function builds up a list of plain columns in the ``new_cols`` arg (which
is passed as a persistent list). This includes both plain columns from the
original table and plain columns that represent data from serialized columns
(e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column).
For serialized columns the ``mixin_cols`` dict is updated with required
attributes and information to subsequently reconstruct the table.
Table mixin columns are always serialized and get represented by one
or more data columns. In earlier versions of the code *only* mixin
columns were serialized, hence the use within this code of "mixin"
to imply serialization. Starting with version 3.1, the non-mixin
``MaskedColumn`` can also be serialized.
"""
obj_attrs = col.info._represent_as_dict()
ordered_keys = col.info._represent_as_dict_attrs
# If serialization is not required (see function docstring above)
# or explicitly specified as excluded, then treat as a normal column.
if not obj_attrs or col.__class__ in exclude_classes:
new_cols.append(col)
return
# Subtlety here is handling mixin info attributes. The basic list of such
# attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'.
# - name: handled directly [DON'T store]
# - unit: DON'T store if this is a parent attribute
# - dtype: captured in plain Column if relevant [DON'T store]
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial, xform in (('unit', lambda x: x is not None and x != '', str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = xform(col_attr) if xform else col_attr
data_attrs = [key for key in ordered_keys if key in obj_attrs and
getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).unless it is the primary data
# attribute for the column (e.g. value for Quantity or data
# for MaskedColumn)
if data_attr == col.info._represent_as_dict_primary_data:
new_name = name
else:
new_name = name + '.' + data_attr
if not has_info_class(data, MixinInfo):
new_cols.append(Column(data, name=new_name, **info))
obj_attrs[data_attr] = SerializedColumn({'name': new_name})
else:
# recurse. This will define obj_attrs[new_name].
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name))
# Strip out from info any attributes defined by the parent
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs['__info__'] = info
# Store the fully qualified class name
obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__
mixin_cols[name] = obj_attrs
def _represent_mixins_as_columns(tbl, exclude_classes=()):
"""
Convert any mixin columns to plain Column or MaskedColumn and
return a new table. Exclude any mixin columns in ``exclude_classes``,
which must be a tuple of classes.
"""
# Dict of metadata for serializing each column, keyed by column name.
# Gets filled in place by _represent_mixin_as_column().
mixin_cols = {}
# List of columns for the output table. For plain Column objects
# this will just be the original column object.
new_cols = []
# Go through table columns and represent each column as one or more
# plain Column objects (in new_cols) + metadata (in mixin_cols).
for col in tbl.itercols():
_represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols,
exclude_classes=exclude_classes)
# If no metadata was created then just return the original table.
if not mixin_cols:
return tbl
meta = deepcopy(tbl.meta)
meta['__serialized_columns__'] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
cls_full_name = obj_attrs.pop('__class__')
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
if cls_full_name not in __construct_mixin_classes:
raise ValueError('unsupported class for construct {}'.format(cls_full_name))
mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups()
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
class _TableLite(OrderedDict):
"""
Minimal table-like object for _construct_mixin_from_columns. This allows
manipulating the object like a Table but without the actual overhead
for a full Table.
More pressing, there is an issue with constructing MaskedColumn, where the
encoded Column components (data, mask) are turned into a MaskedColumn.
When this happens in a real table then all other columns are immediately
Masked and a warning is issued. This is not desirable.
"""
def add_column(self, col, index=0):
colnames = self.colnames
self[col.info.name] = col
for ii, name in enumerate(colnames):
if ii >= index:
self.move_to_end(name)
@property
def colnames(self):
return list(self.keys())
def itercols(self):
return self.values()
def _construct_mixin_from_columns(new_name, obj_attrs, out):
data_attrs_map = {}
for name, val in obj_attrs.items():
if isinstance(val, SerializedColumn):
if 'name' in val:
data_attrs_map[val['name']] = name
else:
_construct_mixin_from_columns(name, val, out)
data_attrs_map[name] = name
for name in data_attrs_map.values():
del obj_attrs[name]
# Get the index where to add new column
idx = min(out.colnames.index(name) for name in data_attrs_map)
# Name is the column name in the table (e.g. "coord.ra") and
# data_attr is the object attribute name (e.g. "ra"). A different
# example would be a formatted time object that would have (e.g.)
# "time_col" and "value", respectively.
for name, data_attr in data_attrs_map.items():
col = out[name]
obj_attrs[data_attr] = col
del out[name]
info = obj_attrs.pop('__info__', {})
if len(data_attrs_map) == 1:
# col is the first and only serialized column; in that case, use info
# stored on the column.
for attr, nontrivial in (('unit', lambda x: x not in (None, '')),
('format', lambda x: x is not None),
('description', lambda x: x is not None),
('meta', lambda x: x)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
info['name'] = new_name
col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info)
out.add_column(col, index=idx)
def _construct_mixins_from_columns(tbl):
if '__serialized_columns__' not in tbl.meta:
return tbl
meta = tbl.meta.copy()
mixin_cols = meta.pop('__serialized_columns__')
out = _TableLite(tbl.columns)
for new_name, obj_attrs in mixin_cols.items():
_construct_mixin_from_columns(new_name, obj_attrs, out)
# If no quantity subclasses are in the output then output as Table.
# For instance ascii.read(file, format='ecsv') doesn't specify an
# output class and should return the minimal table class that
# represents the table file.
has_quantities = any(isinstance(col.info, QuantityInfo)
for col in out.itercols())
out_cls = QTable if has_quantities else Table
return out_cls(list(out.values()), names=out.colnames, copy=False, meta=meta)
|
3ad9a54db262dafef216c27306b5905b8e1b10526f7c535dfaf35821943d6dc4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helper functions for table development, mostly creating useful
tables for testing.
"""
from itertools import cycle
import string
import numpy as np
from .table import Table, Column
from ..utils.data_info import ParentDtypeInfo
class TimingTables:
"""
Object which contains two tables and various other attributes that
are useful for timing and other API tests.
"""
def __init__(self, size=1000, masked=False):
self.masked = masked
# Initialize table
self.table = Table(masked=self.masked)
# Create column with mixed types
np.random.seed(12345)
self.table['i'] = np.arange(size)
self.table['a'] = np.random.random(size) # float
self.table['b'] = np.random.random(size) > 0.5 # bool
self.table['c'] = np.random.random((size, 10)) # 2d column
self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)), size)
self.extra_row = {'a': 1.2, 'b': True, 'c': np.repeat(1, 10), 'd': 'Z'}
self.extra_column = np.random.randint(0, 100, size)
self.row_indices = np.where(self.table['a'] > 0.9)[0]
self.table_grouped = self.table.group_by('d')
# Another table for testing joining
self.other_table = Table(masked=self.masked)
self.other_table['i'] = np.arange(1, size, 3)
self.other_table['f'] = np.random.random()
self.other_table.sort('f')
# Another table for testing hstack
self.other_table_2 = Table(masked=self.masked)
self.other_table_2['g'] = np.random.random(size)
self.other_table_2['h'] = np.random.random((size, 10))
self.bool_mask = self.table['a'] > 0.6
def simple_table(size=3, cols=None, kinds='ifS', masked=False):
"""
Return a simple table for testing.
Example
--------
::
>>> from astropy.table.table_helpers import simple_table
>>> print(simple_table(3, 6, masked=True, kinds='ifOS'))
a b c d e f
--- --- -------- --- --- ---
-- 1.0 {'c': 2} -- 5 5.0
2 2.0 -- e 6 --
3 -- {'e': 4} f -- 7.0
Parameters
----------
size : int
Number of table rows
cols : int, optional
Number of table columns. Defaults to number of kinds.
kinds : str
String consisting of the column dtype.kinds. This string
will be cycled through to generate the column dtype.
The allowed values are 'i', 'f', 'S', 'O'.
Returns
-------
out : `Table`
New table with appropriate characteristics
"""
if cols is None:
cols = len(kinds)
if cols > 26:
raise ValueError("Max 26 columns in SimpleTable")
columns = []
names = [chr(ord('a') + ii) for ii in range(cols)]
letters = np.array([c for c in string.ascii_letters])
for jj, kind in zip(range(cols), cycle(kinds)):
if kind == 'i':
data = np.arange(1, size + 1, dtype=np.int64) + jj
elif kind == 'f':
data = np.arange(size, dtype=np.float64) + jj
elif kind == 'S':
indices = (np.arange(size) + jj) % len(letters)
data = letters[indices]
elif kind == 'O':
indices = (np.arange(size) + jj) % len(letters)
vals = letters[indices]
data = [{val: index} for val, index in zip(vals, indices)]
else:
raise ValueError('Unknown data kind')
columns.append(Column(data))
table = Table(columns, names=names, masked=masked)
if masked:
for ii, col in enumerate(table.columns.values()):
mask = np.array((np.arange(size) + ii) % 3, dtype=bool)
col.mask = ~mask
return table
def complex_table():
"""
Return a masked table from the io.votable test set that has a wide variety
of stressing types.
"""
from ..utils.data import get_pkg_data_filename
from ..io.votable.table import parse
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
votable = parse(get_pkg_data_filename('../io/votable/tests/data/regression.xml'),
pedantic=False)
first_table = votable.get_first_table()
table = first_table.to_table()
return table
class ArrayWrapper:
"""
Minimal mixin using a simple wrapper around a numpy array
"""
info = ParentDtypeInfo()
def __init__(self, data):
self.data = np.array(data)
if 'info' in getattr(data, '__dict__', ()):
self.info = data.info
def __getitem__(self, item):
if isinstance(item, (int, np.integer)):
out = self.data[item]
else:
out = self.__class__(self.data[item])
if 'info' in self.__dict__:
out.info = self.info
return out
def __setitem__(self, item, value):
self.data[item] = value
def __len__(self):
return len(self.data)
def __eq__(self, other):
"""Minimal equality testing, mostly for mixin unit tests"""
if isinstance(other, ArrayWrapper):
return self.data == other.data
else:
return self.data == other
@property
def dtype(self):
return self.data.dtype
@property
def shape(self):
return self.data.shape
def __repr__(self):
return ("<{0} name='{1}' data={2}>"
.format(self.__class__.__name__, self.info.name, self.data))
|
17879a2f6b3c1737d2e52e31e332ebc39b7daa4774f15656dc9b142bf454a57c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The SCEngine class uses the ``sortedcontainers`` package to implement an
Index engine for Tables.
"""
from collections import OrderedDict
from itertools import starmap
try:
from sortedcontainers import SortedList
HAS_SOCO = True
except ImportError:
HAS_SOCO = False
class Node(object):
__slots__ = ('key', 'value')
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, other):
if other.__class__ is Node:
return (self.key, self.value) < (other.key, other.value)
return self.key < other
def __le__(self, other):
if other.__class__ is Node:
return (self.key, self.value) <= (other.key, other.value)
return self.key <= other
def __eq__(self, other):
if other.__class__ is Node:
return (self.key, self.value) == (other.key, other.value)
return self.key == other
def __ne__(self, other):
if other.__class__ is Node:
return (self.key, self.value) != (other.key, other.value)
return self.key != other
def __gt__(self, other):
if other.__class__ is Node:
return (self.key, self.value) > (other.key, other.value)
return self.key > other
def __ge__(self, other):
if other.__class__ is Node:
return (self.key, self.value) >= (other.key, other.value)
return self.key >= other
__hash__ = None
def __repr__(self):
return 'Node({0!r}, {1!r})'.format(self.key, self.value)
class SCEngine:
'''
Fast tree-based implementation for indexing, using the
``sortedcontainers`` package.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __init__(self, data, row_index, unique=False):
node_keys = map(tuple, data)
self._nodes = SortedList(starmap(Node, zip(node_keys, row_index)))
self._unique = unique
def add(self, key, value):
'''
Add a key, value pair.
'''
if self._unique and (key in self._nodes):
message = 'duplicate {0:!r} in unique index'.format(key)
raise ValueError(message)
self._nodes.add(Node(key, value))
def find(self, key):
'''
Find rows corresponding to the given key.
'''
return [node.value for node in self._nodes.irange(key, key)]
def remove(self, key, data=None):
'''
Remove data from the given key.
'''
if data is not None:
item = Node(key, data)
try:
self._nodes.remove(item)
except ValueError:
return False
return True
items = list(self._nodes.irange(key, key))
for item in items:
self._nodes.remove(item)
return bool(items)
def shift_left(self, row):
'''
Decrement rows larger than the given row.
'''
for node in self._nodes:
if node.value > row:
node.value -= 1
def shift_right(self, row):
'''
Increment rows greater than or equal to the given row.
'''
for node in self._nodes:
if node.value >= row:
node.value += 1
def items(self):
'''
Return a list of key, data tuples.
'''
result = OrderedDict()
for node in self._nodes:
if node.key in result:
result[node.key].append(node.value)
else:
result[node.key] = [node.value]
return result.items()
def sort(self):
'''
Make row order align with key order.
'''
for index, node in enumerate(self._nodes):
node.value = index
def sorted_data(self):
'''
Return a list of rows in order sorted by key.
'''
return [node.value for node in self._nodes]
def range(self, lower, upper, bounds=(True, True)):
'''
Return row values in the given range.
'''
iterator = self._nodes.irange(lower, upper, bounds)
return [node.value for node in iterator]
def replace_rows(self, row_map):
'''
Replace rows with the values in row_map.
'''
nodes = [node for node in self._nodes if node.value in row_map]
for node in nodes:
node.value = row_map[node.value]
self._nodes.clear()
self._nodes.update(nodes)
def __repr__(self):
return '{0!r}'.format(list(self._nodes))
|
84f05578d2796a66d4560d8e32cb3486a5c18a20532962882b8d070bd20ed2a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from functools import partial
from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION
from ..utils.exceptions import AstropyUserWarning
from ..utils.console import human_file_size
from ..utils.decorators import deprecated_renamed_argument
from .. import units as u
from ..nddata import support_nddata
from ..modeling.core import _make_arithmetic_operator, BINARY_OPERATORS
from ..modeling.core import _CompoundModelMeta
# Disabling all doctests in this module until a better way of handling warnings
# in doctests can be determined
__doctest_skip__ = ['*']
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
@support_nddata(data='array')
def convolve(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True, mask=None,
preserve_nan=False, normalization_zero_tol=1e-8):
'''
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `~astropy.nddata.NDData` or `numpy.ndarray` or array-like
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array.
* 'fill'
Set values outside the array boundary to ``fill_value`` (default).
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one prior to
convolving
nan_treatment : 'interpolate', 'fill'
interpolate will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel.
'fill' will replace the NaN pixels with a fixed numerical value (default
zero, see ``fill_value``) prior to convolution
Note that if the kernel has a sum equal to zero, NaN interpolation
is not possible and will raise an exception
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
'''
from .boundary_none import (convolve1d_boundary_none,
convolve2d_boundary_none,
convolve3d_boundary_none)
from .boundary_extend import (convolve1d_boundary_extend,
convolve2d_boundary_extend,
convolve3d_boundary_extend)
from .boundary_fill import (convolve1d_boundary_fill,
convolve2d_boundary_fill,
convolve3d_boundary_fill)
from .boundary_wrap import (convolve1d_boundary_wrap,
convolve2d_boundary_wrap,
convolve3d_boundary_wrap)
if boundary not in BOUNDARY_OPTIONS:
raise ValueError("Invalid boundary option: must be one of {0}"
.format(BOUNDARY_OPTIONS))
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# The cython routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# If array and kernel are both Kernal instances, convolve here and return.
if isinstance(kernel, Kernel) and isinstance(array, Kernel):
if isinstance(array, Kernel1D) and isinstance(kernel, Kernel1D):
new_array = convolve1d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel1D(array=new_array)
elif isinstance(array, Kernel2D) and isinstance(kernel, Kernel2D):
new_array = convolve2d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel2D(array=new_array)
else:
raise Exception("Can't convolve 1D and 2D kernel.")
new_kernel._separable = kernel._separable and array._separable
new_kernel._is_bool = False
return new_kernel
# Copy or alias array to array_internal
try:
# Anything that's masked must be turned into NaNs for the interpolation.
# This requires copying. A copy is also needed for nan_treatment == 'fill'
# A copy prevents possible function side-effects of the input array.
if nan_treatment == 'fill' or np.ma.is_masked(array) or mask is not None:
if np.ma.is_masked(array):
# ``np.ma.maskedarray.filled()`` returns a copy, however there is no way to specify the return type
# or order etc.
# In addition ``np.nan`` is a ``float`` and there is no conversion to an ``int`` type.
# Therefore, a pre-fill copy is needed for non ``float`` masked arrays.
# ``subok=True`` is needed to retain ``np.ma.maskedarray.filled()``.
# ``copy=False`` allows the fill to act as the copy if type and order are already correct.
array_internal = np.array(array, dtype=float, copy=False, order='C', subok=True)
array_internal = array_internal.filled(np.nan)
else:
# Since we're making a copy, we might as well use `subok=False` to save,
# what is probably, a negligible amount of memory.
array_internal = np.array(array, dtype=float, copy=True, order='C', subok=False)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
array_internal[mask != 0] = np.nan
else:
# The call below is synonymous with np.asanyarray(array, ftype=float, order='C')
# The advantage of `subok=True` is that it won't copy when array is an ndarray subclass. If it
# is and `subok=False` (default), then it will copy even if `copy=False`. This uses less memory
# when ndarray subclasses are passed in.
array_internal = np.array(array, dtype=float, copy=False, order='C', subok=True)
except (TypeError, ValueError) as e:
raise TypeError('array should be a Numpy array or something '
'convertable into a float array', e)
array_dtype = getattr(array, 'dtype', array_internal.dtype)
# Copy or alias kernel to kernel_internal
# Due to NaN interpolation and kernel normalization, a copy must always be made.
# 1st alias and then copy after depending on whether kernel is masked (so as not to copy twice).
if isinstance(kernel, Kernel):
kernel_internal = kernel.array
else:
kernel_internal = kernel
if np.ma.is_masked(kernel_internal):
# *kernel* doesn't support NaN interpolation, so instead we just fill it.
# np.ma.maskedarray.filled() returns a copy.
kernel_internal = kernel_internal.filled(fill_value)
# MaskedArray.astype() has neither copy nor order params like ndarray.astype has.
# np.ma.maskedarray.filled() returns an ndarray not a maksedarray (implicit default subok=False).
# astype must be called after filling the masked data to avoid possible additional copying.
kernel_internal = kernel_internal.astype(float, copy=False, order='C', subok=True) # subok=True is redundant here but leave for future.
else:
try:
kernel_internal = np.array(kernel_internal, dtype=float, copy=True, order='C', subok=False)
except (TypeError, ValueError) as e:
raise TypeError('kernel should be a Numpy array or something '
'convertable into a float array', e)
# Check that the number of dimensions is compatible
if array_internal.ndim != kernel_internal.ndim:
raise Exception('array and kernel have differing number of '
'dimensions.')
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan:
badvals = np.isnan(array_internal)
if nan_treatment == 'fill':
initially_nan = np.isnan(array_internal)
array_internal[initially_nan] = fill_value
# Because the Cython routines have to normalize the kernel on the fly, we
# explicitly normalize the kernel here, and then scale the image at the
# end if normalization was not requested.
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if (kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero) and normalize_kernel:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
if not kernel_sums_to_zero:
kernel_internal /= kernel_sum
renormalize_by_kernel = not kernel_sums_to_zero
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim == 1:
if boundary == 'extend':
result = convolve1d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary == 'fill':
result = convolve1d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel)
elif boundary == 'wrap':
result = convolve1d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary is None:
result = convolve1d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel)
elif array_internal.ndim == 2:
if boundary == 'extend':
result = convolve2d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif boundary == 'fill':
result = convolve2d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel,
)
elif boundary == 'wrap':
result = convolve2d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif boundary is None:
result = convolve2d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif array_internal.ndim == 3:
if boundary == 'extend':
result = convolve3d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary == 'fill':
result = convolve3d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel)
elif boundary == 'wrap':
result = convolve3d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary is None:
result = convolve3d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel)
else:
raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional '
'arrays at this time')
# If normalization was not requested, we need to scale the array (since
# the kernel is effectively normalized within the cython functions)
if not normalize_kernel and not kernel_sums_to_zero:
result *= kernel_sum
if preserve_nan:
result[badvals] = np.nan
if nan_treatment == 'fill':
array_internal[initially_nan] = np.nan
# Try to preserve the input type if it's a floating point type
if array_dtype.kind == 'f':
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@deprecated_renamed_argument('interpolate_nan', 'nan_treatment', 'v2.0.0')
@support_nddata(data='array')
def convolve_fft(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False, mask=None, crop=True, return_fft=False,
fft_pad=None, psf_pad=None, quiet=False,
min_wt=0.0, allow_huge=False,
fftn=np.fft.fftn, ifftn=np.fft.ifftn,
complex_dtype=complex):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* (optionally) It pads to the nearest 2^n size to improve FFT speed.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.python.org/pypi/pyFFTW>`_ or
`pyFFTW3 <https://pypi.python.org/pypi/PyFFTW3/0.2.1>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fft` and `numpy.fft.ifft`.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution
fill_value : float, optional
The value to use outside the array when using boundary='fill'
nan_treatment : 'interpolate', 'fill'
``interpolate`` will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel. ``fill`` will replace the NaN pixels with a fixed
numerical value (default zero, see ``fill_value``) prior to
convolution. Note that if the kernel has a sum equal to zero, NaN
interpolation is not possible and will raise an exception.
normalize_kernel : function or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
Other Parameters
----------------
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below
fft_pad : bool, optional
Default on. Zero-pad image to the nearest 2^n. With
``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fftn, ifftn : functions, optional
The fft and inverse fft functions. Can be overridden to use your own
ffts, e.g. an fftw3 wrapper or scipy's fftn,
``fft=scipy.fftpack.fftn``
complex_dtype : numpy.complex, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
quiet : bool, optional
Silence warning message about NaN interpolation
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB
Raises
------
ValueError:
If the array is bigger than 1 GB after padding, will raise this exception
unless ``allow_huge`` is True
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data can become
very large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 for further detail.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, 0, 3], [0, 1, 0])
array([ 1., 0., 3.])
>>> convolve_fft([1, 2, 3], [1])
array([ 1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
...
array([ 1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([ 1., 2., 3.])
>>> import scipy.fftpack # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fftpack.fft, ifftn=scipy.fftpack.ifft)
array([ 1., 2., 3.])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError("Can't convolve two kernels with convolve_fft. "
"Use convolve instead.")
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = np.asarray(array, dtype=complex)
kernel = np.asarray(kernel, dtype=complex)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (np.product(arrayshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_B > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_B.to_value(u.byte))))
# mask catching - masks must be turned into NaNs for use later in the image
if np.ma.is_masked(array):
mamask = array.mask
array = np.array(array)
array[mamask] = np.nan
elif mask is not None:
# copying here because we have to mask it below. But no need to copy
# if mask is None because we won't modify it.
array = np.array(array)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
array[mask != 0] = np.nan
# the *kernel* doesn't support NaN interpolation, so instead we just fill it
if np.ma.is_masked(kernel):
kernel = kernel.filled(0)
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1. / MAX_NORMALIZATION:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == 'interpolate':
raise ValueError('Cannot interpolate NaNs with an unnormalizable kernel')
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn("The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary", AstropyUserWarning)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == 'fill':
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn("psf_pad was set to {0}, which overrides the "
"boundary='fill' setting.".format(psf_pad),
AstropyUserWarning)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == 'wrap':
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
fill_value = 0 # force zero; it should not be used
elif boundary == 'extend':
raise NotImplementedError("The 'extend' option is not implemented "
"for fft-based convolution")
# find ideal size (power of 2) for fft.
# Can add shapes because they are tuples
if fft_pad: # default=True
if psf_pad: # default=False
# add the dimensions and then take the max (bigger)
fsize = 2 ** np.ceil(np.log2(
np.max(np.array(arrayshape) + np.array(kernshape))))
else:
# add the shape lists (max of a list of length 4) (smaller)
# also makes the shapes square
fsize = 2 ** np.ceil(np.log2(np.max(arrayshape + kernshape)))
newshape = np.array([fsize for ii in range(array.ndim)], dtype=int)
else:
if psf_pad:
# just add the biggest dimensions
newshape = np.array(arrayshape) + np.array(kernshape)
else:
newshape = np.array([np.max([imsh, kernsh])
for imsh, kernsh in zip(arrayshape, kernshape)])
# perform a second check after padding
array_size_C = (np.product(newshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_C > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_C)))
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.product(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.product(arrayshape)*np.dtype(bool).itemsize
# + np.product(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [slice(center - arraydimsize // 2,
center + (arraydimsize + 1) // 2)]
kernslices += [slice(center - kerndimsize // 2,
center + (kerndimsize + 1) // 2)]
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = (nan_treatment == 'interpolate')
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
# restore NaNs in original image (they were modified inplace earlier)
# We don't have to worry about masked arrays - if input was masked, it was
# copied
array[nanmaskarray] = np.nan
kernel[nanmaskkernel] = np.nan
fftmult *= kernel_scale
if return_fft:
return fftmult
if interpolate_nan:
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(array, kernel, nan_treatment='interpolate',
normalize_kernel=True, **kwargs)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode='convolve_fft', **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : CompoundModel
Convolved model
"""
if mode == 'convolve_fft':
BINARY_OPERATORS['convolve_fft'] = _make_arithmetic_operator(partial(convolve_fft, **kwargs))
elif mode == 'convolve':
BINARY_OPERATORS['convolve'] = _make_arithmetic_operator(partial(convolve, **kwargs))
else:
raise ValueError('Mode {} is not supported.'.format(mode))
return _CompoundModelMeta._from_operator(mode, model, kernel)
|
70a6c2754403d79b7f27fe314d3ff9a3403d236e3556c60bc31dafff2df8680f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
import numpy as np
from .utils import poly_map_domain, _combine_equivalency_dict
from ..units import Quantity
from ..utils.exceptions import AstropyUserWarning
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
# Check pkg_resources exists
try:
from pkg_resources import iter_entry_points
HAS_PKG = True
except ImportError:
HAS_PKG = False
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter']
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units['x'], equivalencies=input_units_equivalencies['x'])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units['y'], equivalencies=input_units_equivalencies['y'])
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(x=x, y=y, z=z)
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(y, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(x=x, y=y, z=z)
return model_new
else:
raise NotImplementedError("This model does not support being fit to data with units")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like (optional)
Input coordinates.
If the dependent (``y`` or ``z``) co-ordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
co-ordinate grids differ.
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x)
else:
lhs = model_copy.fit_deriv(x, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices, x=x, y=y)
else:
lhs = model_copy.fit_deriv(x, y, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
else:
rhs = z.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if lhs.ndim > 2:
raise ValueError('{0} gives unsupported >2D derivative matrix for '
'this x/y'.format(type(model_copy).__name__))
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(x) != len(weights):
raise ValueError("x and weights should have the same length")
if rhs.ndim == 2:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original dependent
# variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
if rcond is None:
rcond = len(x) * np.finfo(x.dtype).eps
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if len(model_copy) == 1 or not masked:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
else:
# Where fitting multiple models with masked pixels, initialize an
# empty array of coefficients and populate it one model at a time.
# The shape matches the number of coefficients from the Vandermonde
# matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[-1:] + rhs.shape[-1:], dtype=rhs.dtype)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_rhs, model_lacoef in zip(rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask
model_lhs = lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef = (lacoef.T / scl).T
self.fit_info['params'] = lacoef
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if hasattr(model_copy, '_order') and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
_fitter_to_model_params(model_copy, lacoef.flatten())
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a number of iterations ``niter``, outliers are removed
and fitting is performed for each iteration.
Parameters
----------
fitter : An Astropy fitter
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : function
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int (optional)
Number of iterations.
outlier_kwargs : dict (optional)
Keyword arguments for outlier_func.
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like (optional)
Data measurements (2D case).
weights : array-like (optional)
Weights to be passed to the fitter.
kwargs : dict (optional)
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behaviour and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y co-ordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if not hasattr(self.fitter, 'supports_masked_input') or \
self.fitter.supports_masked_input is not True:
raise ValueError("{0} cannot fit model sets with masked "
"values".format(type(self.fitter).__name__))
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input co-ordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = x,
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if 'axis' not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs['axis'] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
# Perform the iterative fitting:
# TO DO: add a stopping criterion when results aren't changing?
for n in range(self.niter):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop('axis', None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask,
model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(data_T, mask_T,
model_vals_T):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn('outlier_func did not accept axis argument; '
'reverted to slow loop over models.',
AstropyUserWarning)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights, **kwargs)
else:
fitted_model = self.fitter(fitted_model, *coords,
filtered_data,
weights=filtered_weights, **kwargs)
return fitted_model, filtered_data.mask
class LevMarLSQFitter(metaclass=_FitterMeta):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
_fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
return [np.ravel(_) for _ in np.ravel(weights) * np.array(model.fit_deriv(x, *params))]
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in (
np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
else:
return [np.ravel(_) for _ in (weights * np.array(model.fit_deriv(x, y, *params)))]
class SLSQPLSQFitter(Fitter):
"""
SLSQP optimization algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = [p.flatten() for p in model.parameters]
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError("Expected >1 models, {} is given".format(
len(self.models)))
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1:
if z is None:
if y.shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y array is expected to equal "
"the number of parameter sets)")
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
else:
# Shape of z excluding model_set_axis
z_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]
if not (x.shape == y.shape == z_shape):
raise ValueError("x, y and z should have the same shape")
if z is None:
farg = (x, y)
else:
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
"""
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
model.parameters[slice_] = values
offset += size
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
model.parameters[slice_] = value
def _model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
else:
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : a list of `~pkg_resources.EntryPoint`
entry_points are objects which encapsulate
importable objects and are defined on the
installation of a package.
Notes
-----
An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning('{type} error occurred in entry '
'point {name}.' .format(type=type(e).__name__, name=name)))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to be a '
'Class.' .format(name)))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to extend '
'astropy.modeling.Fitter' .format(name)))
# this is so fitting doesn't choke if pkg_resources doesn't exist
if HAS_PKG:
populate_entry_points(iter_entry_points(group='astropy.modeling', name=None))
|
e8ddda64d207ef3ba3775288086c6ef53df98e5086eb6868eb340c2d92c9b9cd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
import abc
import copy
import copyreg
import inspect
import functools
import operator
import types
import warnings
from collections import defaultdict, OrderedDict
from contextlib import suppress
from inspect import signature
from itertools import chain, islice
from functools import partial
import numpy as np
from ..utils import indent, metadata
from ..table import Table
from ..units import Quantity, UnitsError, dimensionless_unscaled
from ..units.utils import quantity_asanyarray
from ..utils import (sharedmethod, find_current_module,
InheritDocstrings, OrderedDescriptorContainer,
check_broadcast, IncompatibleShapeError, isiterable)
from ..utils.codegen import make_function_with_signature
from ..utils.exceptions import AstropyDeprecationWarning
from .utils import (combine_labels, make_binary_operator_eval,
ExpressionTree, AliasDict, get_inputs_and_params,
_BoundingBox, _combine_equivalency_dict)
from ..nddata.utils import add_array, extract_array
from .parameters import Parameter, InputParameterError, param_repr_oneline
__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',
'custom_model', 'ModelDefinitionError']
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions"""
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
Any additional keyword arguments passed in are passed to
`_CompoundModelMeta._from_operator`.
"""
# Note: Originally this used functools.partial, but that won't work when
# used in the class definition of _CompoundModelMeta since
# _CompoundModelMeta has not been defined yet.
# Perform an arithmetic operation on two models.
return lambda left, right: _CompoundModelMeta._from_operator(oper, left,
right, **kwargs)
class _ModelMeta(OrderedDescriptorContainer, InheritDocstrings, abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
_parameters_ = OrderedDict()
def __new__(mcls, name, bases, members):
# See the docstring for _is_dynamic above
if '_is_dynamic' not in members:
members['_is_dynamic'] = mcls._is_dynamic
return super().__new__(mcls, name, bases, members)
def __init__(cls, name, bases, members):
# Make sure OrderedDescriptorContainer gets to run before doing
# anything else
super().__init__(name, bases, members)
if cls._parameters_:
if hasattr(cls, '_param_names'):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(cls._parameters_)
else:
cls.param_names = tuple(cls._parameters_)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
cls._handle_special_methods(members)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
else:
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith('_abc_'):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ('__init__', '__call__'):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def n_inputs(cls):
return len(cls.inputs)
@property
def n_outputs(cls):
return len(cls.outputs)
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith('_') or inspect.isabstract(cls))
def rename(cls, name):
"""
Creates a copy of this model class with a new name.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class '__main__.SkyRotation'>
Name: SkyRotation (Rotation2D)
Inputs: ('x', 'y')
Outputs: ('x', 'y')
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
new_cls = type(name, (cls,), {})
new_cls.__module__ = modname
if hasattr(cls, '__qualname__'):
if new_cls.__module__ == '__main__':
# __main__ is not added to a class's qualified name
new_cls.__qualname__ = name
else:
new_cls.__qualname__ = '{0}.{1}'.format(modname, name)
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get('inverse')
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get('bounding_box')
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = _BoundingBox.validate(cls, bounding_box)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
# TODO: Maybe warn in the above case?
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = \
cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of _BoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
'The bounding_box method for {0} is not correctly '
'defined: If defined as a method all arguments to that '
'method (besides self) must be keyword arguments with '
'default values that can be used to compute a default '
'bounding box.'.format(cls.name))
kwargs.append((param.name, param.default))
__call__ = make_function_with_signature(__call__, ('self',), kwargs)
return type(str('_{0}BoundingBox'.format(cls.name)), (_BoundingBox,),
{'__call__': __call__})
def _handle_special_methods(cls, members):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, '__qualname__'):
wrapper.__qualname__ = '{0}.{1}'.format(
cls.__qualname__, wrapper.__name__)
if ('__call__' not in members and 'inputs' in members and
isinstance(members['inputs'], tuple)):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
inputs = members['inputs']
args = ('self',) + inputs
new_call = make_function_with_signature(
__call__, args, [('model_set_axis', None),
('with_bounding_box', False),
('fill_value', np.nan),
('equivalencies', None)])
# The following makes it look like __call__ was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if ('__init__' not in members and not inspect.isabstract(cls) and
cls._parameters_):
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional arguments
if all(p.default is not None for p in cls._parameters_.values()):
args = ('self',)
kwargs = []
for param_name in cls.param_names:
default = cls._parameters_[param_name].default
unit = cls._parameters_[param_name].unit
# If the unit was specified in the parameter but the default
# is not a Quantity, attach the unit to the default.
if unit is not None:
default = Quantity(default, unit, copy=False)
kwargs.append((param_name, default))
else:
args = ('self',) + cls.param_names
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs='kwargs')
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif (inspect.isabstract(base) or
base.__name__.startswith('_')):
break
bases.append(base.name)
if bases:
return '{0} ({1})'.format(cls.name, ' -> '.join(bases))
else:
return cls.name
try:
default_keywords = [
('Name', format_inheritance(cls)),
('Inputs', cls.inputs),
('Outputs', cls.outputs),
]
if cls.param_names:
default_keywords.append(('Fittable parameters',
cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append('{0}: {1}'.format(keyword, value))
return '\n'.join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
This class sets the constraints and other properties for all individual
parameters and performs parameter validation.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ('eqcons', 'ineqcons')
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
inputs = ()
"""The name(s) of the input variable(s) on which a model is evaluated."""
outputs = ()
"""The name(s) of the output(s) of the model."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
if meta is not None:
self.meta = meta
self._name = name
self._initialize_constraints(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_unit_support()
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolena value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {key: self._input_units_strict for
key in self.__class__.inputs}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless
for key in self.__class__.inputs}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.__class__.inputs}
else:
return val
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.__class__.inputs}
else:
return val
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
def __call__(self, *inputs, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
inputs, format_info = self.prepare_inputs(*inputs, **kwargs)
parameters = self._param_sets(raw=True, units=True)
with_bbox = kwargs.pop('with_bounding_box', False)
fill_value = kwargs.pop('fill_value', np.nan)
bbox = None
if with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if self.n_inputs > 1 and bbox is not None:
# bounding_box is in python order - convert it to the order of the inputs
bbox = bbox[::-1]
if bbox is None:
outputs = self.evaluate(*chain(inputs, parameters))
else:
if self.n_inputs == 1:
bbox = [bbox]
# indices where input is outside the bbox
# have a value of 1 in ``nan_ind``
nan_ind = np.zeros(inputs[0].shape, dtype=bool)
for ind, inp in enumerate(inputs):
# Pass an ``out`` array so that ``axis_ind`` is array for scalars as well.
axis_ind = np.zeros(inp.shape, dtype=bool)
axis_ind = np.logical_or(inp < bbox[ind][0], inp > bbox[ind][1], out=axis_ind)
nan_ind[axis_ind] = 1
# get an array with indices of valid inputs
valid_ind = np.logical_not(nan_ind).nonzero()
# inputs holds only inputs within the bbox
args = []
for input in inputs:
if not input.shape:
# shape is ()
if nan_ind:
outputs = [fill_value for a in args]
else:
args.append(input)
else:
args.append(input[valid_ind])
valid_result = self.evaluate(*chain(args, parameters))
if self.n_outputs == 1:
valid_result = [valid_result]
# combine the valid results with the ``fill_value`` values
# outside the bbox
result = [np.zeros(inputs[0].shape) + fill_value for i in range(len(valid_result))]
for ind, r in enumerate(valid_result):
if not result[ind].shape:
# shape is ()
result[ind] = r
else:
result[ind][valid_ind] = r
# format output
if self.n_outputs == 1:
outputs = np.asarray(result[0])
else:
outputs = [np.asarray(r) for r in result]
else:
outputs = self.evaluate(*chain(inputs, parameters))
if self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(format_info, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
else:
return outputs
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def n_inputs(self):
"""
The number of inputs to this model.
Equivalent to ``len(model.inputs)``.
"""
return len(self.inputs)
@property
def n_outputs(self):
"""
The number of outputs from this model.
Equivalent to ``len(model.outputs)``.
"""
return len(self.outputs)
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on `Model Sets
<http://docs.astropy.org/en/stable/modeling/models.html#model-sets>`_
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
"parameters array: {0}".format(e))
@property
def fixed(self):
"""
A `dict` mapping parameter names to their fixed constraint.
"""
return self._constraints['fixed']
@property
def tied(self):
"""
A `dict` mapping parameter names to their tied constraint.
"""
return self._constraints['tied']
@property
def bounds(self):
"""
A `dict` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
return self._constraints['bounds']
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._constraints['eqcons']
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._constraints['ineqcons']
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
return self._inverse()
raise NotImplementedError("An analytical inverse transform has not "
"been implemented for this model.")
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse.")
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
del self._user_inverse
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
`None` for no bounding box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`bounding-boxes`
The limits are ordered according to the `numpy` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "astropy\modeling\core.py", line 980, in bounding_box
"No bounding box is defined for this model (note: the "
NotImplementedError: No bounding box is defined for this model (note:
the bounding box was explicitly disabled for this model; use `del
model.bounding_box` to restore the default bounding box, if one is
defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model).")
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError(
"No bounding box is defined for this model.")
elif isinstance(self._bounding_box, _BoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return self._bounding_box()
else:
# The only other allowed possibility is that it's a _BoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), _model=self)()
return self._bounding_box(bounding_box, _model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif (isinstance(self._bounding_box, type) and
issubclass(self._bounding_box, _BoundingBox)):
cls = self._bounding_box
else:
cls = _BoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def separable(self):
""" A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
else:
raise NotImplementedError(
'The "separable" property is not defined for '
'model {}'.format(self.__class__.__name__))
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have been
converted to the right units for the data, then the units have been
stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not necessarily
the units of the input data, but are derived from them. Model subclasses
that want fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
return model
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units of
the input data, but are derived from them. Model subclasses that want
fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly, hence
# the call to _set_unit.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a '_parameter_units_for_data_units' method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, '_parameter_units_for_data_units')
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of the
returned array. If this is not provided (or None), the model will be
evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of this
model is not set.
Examples
--------
:ref:`bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out, dtype=float)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overriden since it will return the input units based on the
annotations.
"""
if hasattr(self, '_input_units'):
return self._input_units
elif hasattr(self.evaluate, '__annotations__'):
annotations = self.evaluate.__annotations__.copy()
annotations.pop('return', None)
if annotations:
# If there are not annotations for all inputs this will error.
return dict((name, annotations[name]) for name in self.inputs)
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the output
of evaluate should be in, and returns a dictionary mapping outputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overriden since it will return the return units based on the
annotations.
"""
if hasattr(self, '_return_units'):
return self._return_units
elif hasattr(self.evaluate, '__annotations__'):
return self.evaluate.__annotations__.get('return', None)
else:
# None means any unit is accepted
return None
def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None,
**kwargs):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
n_models = len(self)
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
_validate_input_shapes(inputs, self.inputs, n_models,
model_set_axis, self.standard_broadcasting)
inputs = self._validate_input_units(inputs, equivalencies)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if n_models == 1:
return _prepare_inputs_single_model(self, params, inputs,
**kwargs)
else:
return _prepare_inputs_model_set(self, params, inputs, n_models,
model_set_axis, **kwargs)
def _validate_input_units(self, inputs, equivalencies=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(self.inputs,
equivalencies,
self.input_units_equivalencies)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(input_unit, equivalencies=input_units_equivalencies[input_name]):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is because
# some equivalencies are non-linear, and we need to be
# sure that we evaluate the model in its own frame
# of reference. If input_units_strict is set, we also
# need to convert to the input units.
if len(input_units_equivalencies) > 0 or self.input_units_strict[input_name]:
inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[input_name])
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}), could not be "
"converted to required dimensionless "
"input".format(name,
self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type))
else:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}), could not be "
"converted to required input units of "
"{4} ({5})".format(name, self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type,
input_unit,
input_unit.physical_type))
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (not self.input_units_allow_dimensionless[input_name] and
input_unit is not dimensionless_unscaled and input_unit is not None):
if np.any(inputs[i] != 0):
raise UnitsError("{0}: Units of input '{1}', (dimensionless), could not be "
"converted to required input units of "
"{2} ({3})".format(name, self.inputs[i], input_unit,
input_unit.physical_type))
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple([Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)])
return outputs
def prepare_outputs(self, format_info, *outputs, **kwargs):
model_set_axis = kwargs.get('model_set_axis', None)
if len(self) == 1:
return _prepare_outputs_single_model(self, outputs, format_info)
else:
return _prepare_outputs_model_set(self, outputs, format_info, model_set_axis)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return copy.deepcopy(self)
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
@sharedmethod
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
# *** Internal methods ***
@sharedmethod
def _from_existing(self, existing, param_names):
"""
Creates a new instance of ``cls`` that shares its underlying parameter
values with an existing model instance given by ``existing``.
This is used primarily by compound models to return a view of an
individual component of a compound model. ``param_names`` should be
the names of the parameters in the *existing* model to use as the
parameters in this new model. Its length should equal the number of
parameters this model takes, so that it can map parameters on the
existing model to parameters on this model one-to-one.
"""
# Basically this is an alternative __init__
if isinstance(self, type):
# self is a class, not an instance
needs_initialization = True
dummy_args = (0,) * len(param_names)
self = self.__new__(self, *dummy_args)
else:
needs_initialization = False
self = self.copy()
aliases = dict(zip(self.param_names, param_names))
# This is basically an alternative _initialize_constraints
constraints = {}
for cons_type in self.parameter_constraints:
orig = existing._constraints[cons_type]
constraints[cons_type] = AliasDict(orig, aliases)
self._constraints = constraints
self._n_models = existing._n_models
self._model_set_axis = existing._model_set_axis
self._parameters = existing._parameters
self._param_metrics = defaultdict(dict)
for param_a, param_b in aliases.items():
# Take the param metrics info for the giving parameters in the
# existing model, and hand them to the appropriate parameters in
# the new model
self._param_metrics[param_a] = existing._param_metrics[param_b]
if needs_initialization:
self.__init__(*dummy_args)
return self
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
if hasattr(self, '_constraints'):
# Skip constraint initialization if it has already been handled via
# an alternate initialization
return
self._constraints = {}
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
self._constraints[constraint] = values.copy()
# Update with default parameter constraints
for param_name in self.param_names:
param = getattr(self, param_name)
# Parameters don't have all constraint types
value = getattr(param, constraint)
if value is not None:
self._constraints[constraint][param_name] = value
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._constraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
if hasattr(self, '_parameters'):
# Skip parameter initialization if it has already been handled via
# an alternate initialization
return
n_models = kwargs.pop('n_models', None)
if not (n_models is None or
(isinstance(n_models, (int, np.integer)) and n_models >= 1)):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
"(got {0!r})".format(n_models))
model_set_axis = kwargs.pop('model_set_axis', None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (model_set_axis is False or
(isinstance(model_set_axis, int) and
not isinstance(model_set_axis, bool))):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
"model in a set of models (got {0!r}).".format(
model_set_axis))
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = {}
if len(args) > len(self.param_names):
raise TypeError(
"{0}.__init__() takes at most {1} positional arguments ({2} "
"given)".format(self.__class__.__name__, len(self.param_names),
len(args)))
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[self.param_names[idx]] = quantity_asanyarray(arg, dtype=float)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
"{0}.__init__() got multiple values for parameter "
"{1!r}".format(self.__class__.__name__, param_name))
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[param_name] = quantity_asanyarray(value, dtype=float)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
'{0}.__init__() got an unrecognized parameter '
'{1!r}'.format(self.__class__.__name__, kwarg))
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name, value in params.items():
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension "
"at least {0} for model_set_axis={1} (the value "
"given for {2!r} is only {3}-dimensional)".format(
min_ndim, model_set_axis, name, param_ndim))
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
"Inconsistent dimensions for parameter {0!r} for "
"{1} model sets. The length of axis {2} must be the "
"same for all input parameter values".format(
name, n_models, model_set_axis))
self._check_param_broadcast(params, max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(params, None)
self._n_models = n_models
self._initialize_parameter_values(params)
def _initialize_parameter_values(self, params):
# self._param_metrics should have been initialized in
# self._initialize_parameters
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
unit = None
param_descr = getattr(self, name)
if params.get(name) is None:
default = param_descr.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
"{0}.__init__() requires a value for parameter "
"{1!r}".format(self.__class__.__name__, name))
value = params[name] = default
unit = param_descr.unit
else:
value = params[name]
if isinstance(value, Quantity):
unit = value.unit
else:
unit = None
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
if unit is None and param_descr.unit is not None:
raise InputParameterError(
"{0}.__init__() requires a Quantity for parameter "
"{1!r}".format(self.__class__.__name__, name))
param_metrics[name]['orig_unit'] = unit
param_metrics[name]['raw_unit'] = None
if param_descr._setter is not None:
_val = param_descr._setter(value)
if isinstance(_val, Quantity):
param_metrics[name]['raw_unit'] = _val.unit
else:
param_metrics[name]['raw_unit'] = None
total_size += param_size
self._param_metrics = param_metrics
self._parameters = np.empty(total_size, dtype=np.float64)
# Now set the parameter values (this will also fill
# self._parameters)
# TODO: This is a bit ugly, but easier to deal with than how this was
# done previously. There's still lots of opportunity for refactoring
# though, in particular once we move the _get/set_model_value methods
# out of Parameter and into Model (renaming them
# _get/set_parameter_value)
for name, value in params.items():
# value here may be a Quantity object.
param_descr = getattr(self, name)
unit = param_descr.unit
value = np.array(value)
orig_unit = param_metrics[name]['orig_unit']
if param_descr._setter is not None:
if unit is not None:
value = np.asarray(param_descr._setter(value * orig_unit).value)
else:
value = param_descr._setter(value)
self._parameters[param_metrics[name]['slice']] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
for name in params:
param_descr = getattr(self, name)
param_descr.validator(param_descr.value)
def _check_param_broadcast(self, params, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
param_names = []
model_set_axis = self._model_set_axis
for name in self.param_names:
# Previously this just used iteritems(params), but we loop over all
# param_names instead just to ensure some determinism in the
# ordering behavior
if name not in params:
continue
value = params[name]
param_names.append(name)
# We've already checked that each parameter array is compatible in
# the model_set_axis dimension, but now we need to check the
# dimensions excluding that axis
# Split the array dimensions into the axes before model_set_axis
# and after model_set_axis
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (param_shape[:model_set_axis + 1] +
new_axes +
param_shape[model_set_axis + 1:])
self._param_metrics[name]['broadcast_shape'] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = param_names[shape_a_idx]
param_b = param_names[shape_b_idx]
raise InputParameterError(
"Parameter {0!r} of shape {1!r} cannot be broadcast with "
"parameter {2!r} of shape {3!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules.".format(param_a, shape_a,
param_b, shape_b))
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
param_metrics = self._param_metrics
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw:
value = param._raw_value
else:
value = param.value
broadcast_shape = param_metrics[name].get('broadcast_shape')
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and self._param_metrics[name]['raw_unit'] is not None:
unit = self._param_metrics[name]['raw_unit']
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
# TODO: Returning an array from this method may be entirely pointless
# for internal use--perhaps only the external param_sets method should
# return an array (and just for backwards compat--I would prefer to
# maybe deprecate that method)
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# TODO: I think this could be reworked to preset model sets better
parts = [repr(a) for a in args]
parts.extend(
"{0}={1}".format(name,
param_repr_oneline(getattr(self, name)))
for name in self.param_names)
if self.name is not None:
parts.append('name={0!r}'.format(self.name))
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] != value:
continue
parts.append('{0}={1!r}'.format(kwarg, value))
if len(self) > 1:
parts.append("n_models={0}".format(len(self)))
return '<{0}({1})>'.format(self.__class__.__name__, ', '.join(parts))
def _format_str(self, keywords=[]):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Model set size', len(self))
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords + keywords
if value is not None]
parts.append('Parameters:')
if len(self) == 1:
columns = [[getattr(self, name).value]
for name in self.param_names]
else:
columns = [getattr(self, name).value
for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return '\n'.join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x', 'y')
outputs = ('z',)
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params),
f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: (f[0](inputs[:f[1]], params) +
g[0](inputs[f[1]:], params)),
f[1] + g[1], f[2] + g[2])
# TODO: Support a couple unary operators--at least negation?
BINARY_OPERATORS = {
'+': _make_arithmetic_operator(operator.add),
'-': _make_arithmetic_operator(operator.sub),
'*': _make_arithmetic_operator(operator.mul),
'/': _make_arithmetic_operator(operator.truediv),
'**': _make_arithmetic_operator(operator.pow),
'|': _composition_operator,
'&': _join_operator
}
_ORDER_OF_OPERATORS = [('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
class _CompoundModelMeta(_ModelMeta):
_tree = None
_submodels = None
_submodel_names = None
_nextid = 0
_param_names = None
# _param_map is a mapping of the compound model's generated param names to
# the parameters of submodels they are associated with. The values in this
# mapping are (idx, name) tuples were idx is the index of the submodel this
# parameter is associated with, and name is the same parameter's name on
# the submodel
# In principle this will allow compound models to give entirely new names
# to parameters that don't have to be the same as their original names on
# the submodels, but right now that isn't taken advantage of
_param_map = None
_slice_offset = 0
# When taking slices of a compound model, this keeps track of how offset
# the first model in the slice is from the first model in the original
# compound model it was taken from
# This just inverts _param_map, swapping keys with values. This is also
# useful to have.
_param_map_inverse = None
_fittable = None
_evaluate = None
def __getitem__(cls, index):
index = cls._normalize_index(index)
if isinstance(index, (int, np.integer)):
return cls._get_submodels()[index]
else:
return cls._get_slice(index.start, index.stop)
def __getattr__(cls, attr):
# Make sure the _tree attribute is set; otherwise we are not looking up
# an attribute on a concrete compound model class and should just raise
# the AttributeError
if cls._tree is not None and attr in cls.param_names:
cls._init_param_descriptors()
return getattr(cls, attr)
raise AttributeError(attr)
def __repr__(cls):
if cls._tree is None:
# This case is mostly for debugging purposes
return cls._format_cls_repr()
expression = cls._format_expression()
components = cls._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return cls._format_cls_repr(keywords=keywords)
def __dir__(cls):
"""
Returns a list of attributes defined on a compound model, including
all of its parameters.
"""
basedir = super().__dir__()
if cls._tree is not None:
for name in cls.param_names:
basedir.append(name)
basedir.sort()
return basedir
def __reduce__(cls):
rv = super().__reduce__()
if isinstance(rv, tuple):
# Delete _evaluate from the members dict
with suppress(KeyError):
del rv[1][2]['_evaluate']
return rv
@property
def submodel_names(cls):
if cls._submodel_names is None:
seen = {}
names = []
for idx, submodel in enumerate(cls._get_submodels()):
name = str(submodel.name)
if name in seen:
names.append('{0}_{1}'.format(name, idx))
if seen[name] >= 0:
jdx = seen[name]
names[jdx] = '{0}_{1}'.format(names[jdx], jdx)
seen[name] = -1
else:
names.append(name)
seen[name] = idx
cls._submodel_names = tuple(names)
return cls._submodel_names
@property
def param_names(cls):
if cls._param_names is None:
cls._init_param_names()
return cls._param_names
@property
def fittable(cls):
if cls._fittable is None:
cls._fittable = all(m.fittable for m in cls._get_submodels())
return cls._fittable
# TODO: Maybe we could use make_function_with_signature for evaluate, but
# it's probably not worth it (and I'm not sure what the limit is on number
# of function arguments/local variables but we could break that limit for
# complicated compound models...
def evaluate(cls, *args):
if cls._evaluate is None:
func = cls._tree.evaluate(BINARY_OPERATORS,
getter=cls._model_evaluate_getter)[0]
cls._evaluate = func
inputs = args[:cls.n_inputs]
params = iter(args[cls.n_inputs:])
result = cls._evaluate(inputs, params)
if cls.n_outputs == 1:
return result[0]
else:
return result
# TODO: This supports creating a new compound model from two existing
# compound models (or normal models) and a single operator. However, it
# ought also to be possible to create a new model from an *entire*
# expression, represented as a sequence of operators and their operands (or
# an exiting ExpressionTree) and build that into a compound model without
# creating an intermediate _CompoundModel class for every single operator
# in the expression. This will prove to be a useful optimization in many
# cases
@classmethod
def _from_operator(mcls, operator, left, right, additional_members={}):
"""
Given a Python operator (represented by a string, such as ``'+'``
or ``'*'``, and two model classes or instances, return a new compound
model that evaluates the given operator on the outputs of the left and
right input models.
If either of the input models are a model *class* (i.e. a subclass of
`~astropy.modeling.Model`) then the returned model is a new subclass of
`~astropy.modeling.Model` that may be instantiated with any parameter
values. If both input models are *instances* of a model, a new class
is still created, but this method returns an *instance* of that class,
taking the parameter values from the parameters of the input model
instances.
If given, the ``additional_members`` `dict` may provide additional
class members that should be added to the generated
`~astropy.modeling.Model` subclass. Some members that are generated by
this method should not be provided by ``additional_members``. These
include ``_tree``, ``inputs``, ``outputs``, ``linear``,
``standard_broadcasting``, and ``__module__`. This is currently for
internal use only.
"""
# Note, currently this only supports binary operators, but could be
# easily extended to support unary operators (namely '-') if/when
# needed
children = []
for child in (left, right):
if isinstance(child, (_CompoundModelMeta, _CompoundModel)):
"""
Although the original child models were copied we make another
copy here to ensure that changes in this child compound model
parameters will not propagate to the reuslt, that is
cm1 = Gaussian1D(1, 5, .1) + Gaussian1D()
cm2 = cm1 | Scale()
cm1.amplitude_0 = 100
assert(cm2.amplitude_0 == 1)
"""
children.append(copy.deepcopy(child._tree))
elif isinstance(child, Model):
children.append(ExpressionTree(child.copy(),
inputs=child.inputs,
outputs=child.outputs))
else:
children.append(ExpressionTree(child, inputs=child.inputs, outputs=child.outputs))
inputs, outputs = mcls._check_inputs_and_outputs(operator, left, right)
tree = ExpressionTree(operator, left=children[0], right=children[1],
inputs=inputs, outputs=outputs)
name = str('CompoundModel{0}'.format(_CompoundModelMeta._nextid))
_CompoundModelMeta._nextid += 1
mod = find_current_module(3)
if mod:
modname = mod.__name__
else:
modname = '__main__'
if operator in ('|', '+', '-'):
linear = left.linear and right.linear
else:
# Which is not to say it is *definitely* not linear but it would be
# trickier to determine
linear = False
standard_broadcasting = left.standard_broadcasting and right.standard_broadcasting
# Note: If any other members are added here, make sure to mention them
# in the docstring of this method.
members = additional_members
members.update({
'_tree': tree,
'_is_dynamic': True, # See docs for _ModelMeta._is_dynamic
'inputs': inputs,
'outputs': outputs,
'linear': linear,
'standard_broadcasting': standard_broadcasting,
'__module__': str(modname)})
new_cls = mcls(name, (_CompoundModel,), members)
if isinstance(left, Model) and isinstance(right, Model):
# Both models used in the operator were already instantiated models,
# not model *classes*. As such it's not particularly useful to return
# the class itself, but to instead produce a new instance:
instance = new_cls()
# Workaround for https://github.com/astropy/astropy/issues/3542
# TODO: Any effort to restructure the tree-like data structure for
# compound models should try to obviate this workaround--if
# intermediate compound models are stored in the tree as well then
# we can immediately check for custom inverses on sub-models when
# computing the inverse
instance._user_inverse = mcls._make_user_inverse(
operator, left, right)
if left._n_models == right._n_models:
instance._n_models = left._n_models
else:
raise ValueError('Model sets must have the same number of '
'components.')
return instance
# Otherwise return the new uninstantiated class itself
return new_cls
@classmethod
def _check_inputs_and_outputs(mcls, operator, left, right):
# TODO: These aren't the full rules for handling inputs and outputs, but
# this will handle most basic cases correctly
if operator == '|':
inputs = left.inputs
outputs = right.outputs
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |: {0} (n_inputs={1}, "
"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); "
"n_outputs for the left-hand model must match n_inputs "
"for the right-hand model.".format(
left.name, left.n_inputs, left.n_outputs, right.name,
right.n_inputs, right.n_outputs))
elif operator == '&':
inputs = combine_labels(left.inputs, right.inputs)
outputs = combine_labels(left.outputs, right.outputs)
else:
# Without loss of generality
inputs = left.inputs
outputs = left.outputs
if (left.n_inputs != right.n_inputs or
left.n_outputs != right.n_outputs):
raise ModelDefinitionError(
"Unsupported operands for {0}: {1} (n_inputs={2}, "
"n_outputs={3}) and {4} (n_inputs={5}, n_outputs={6}); "
"models must have the same n_inputs and the same "
"n_outputs for this operator".format(
operator, left.name, left.n_inputs, left.n_outputs,
right.name, right.n_inputs, right.n_outputs))
return inputs, outputs
@classmethod
def _make_user_inverse(mcls, operator, left, right):
"""
Generates an inverse `Model` for this `_CompoundModel` when either
model in the operation has a *custom inverse* that was manually
assigned by the user.
If either model has a custom inverse, and in particular if another
`_CompoundModel` has a custom inverse, then none of that model's
sub-models should be considered at all when computing the inverse.
So in that case we just compute the inverse ahead of time and set
it as the new compound model's custom inverse.
Note, this use case only applies when combining model instances,
since model classes don't currently have a notion of a "custom
inverse" (though it could probably be supported by overriding the
class's inverse property).
TODO: Consider fixing things so the aforementioned class-based case
works as well. However, for the present purposes this is good enough.
"""
if not (operator in ('&', '|') and
(left._user_inverse or right._user_inverse)):
# These are the only operators that support an inverse right now
return None
try:
left_inv = left.inverse
right_inv = right.inverse
except NotImplementedError:
# If either inverse is undefined then just return False; this
# means the normal _CompoundModel.inverse routine will fail
# naturally anyways, since it requires all sub-models to have
# an inverse defined
return None
if operator == '&':
return left_inv & right_inv
else:
return right_inv | left_inv
# TODO: Perhaps, just perhaps, the post-order (or ???-order) ordering of
# leaf nodes is something the ExpressionTree class itself could just know
def _get_submodels(cls):
# Would make this a lazyproperty but those don't currently work with
# type objects
if cls._submodels is not None:
return cls._submodels
submodels = [c.value for c in cls._tree.traverse_postorder()
if c.isleaf]
cls._submodels = submodels
return submodels
def _init_param_descriptors(cls):
"""
This routine sets up the names for all the parameters on a compound
model, including figuring out unique names for those parameters and
also mapping them back to their associated parameters of the underlying
submodels.
Setting this all up is costly, and only necessary for compound models
that a user will directly interact with. For example when building an
expression like::
>>> M = (Model1 + Model2) * Model3 # doctest: +SKIP
the user will generally never interact directly with the temporary
result of the subexpression ``(Model1 + Model2)``. So there's no need
to setup all the parameters for that temporary throwaway. Only once
the full expression is built and the user initializes or introspects
``M`` is it necessary to determine its full parameterization.
"""
# Accessing cls.param_names will implicitly call _init_param_names if
# needed and thus also set up the _param_map; I'm not crazy about that
# design but it stands for now
for param_name in cls.param_names:
submodel_idx, submodel_param = cls._param_map[param_name]
submodel = cls[submodel_idx]
orig_param = getattr(submodel, submodel_param, None)
if isinstance(submodel, Model):
# Take the parameter's default from the model's value for that
# parameter
default = orig_param.value
else:
default = orig_param.default
# Copy constraints
constraints = dict((key, getattr(orig_param, key))
for key in Model.parameter_constraints)
# Note: Parameter.copy() returns a new unbound Parameter, never
# a bound Parameter even if submodel is a Model instance (as
# opposed to a Model subclass)
new_param = orig_param.copy(name=param_name, default=default,
unit=orig_param.unit,
**constraints)
setattr(cls, param_name, new_param)
def _init_param_names(cls):
"""
This subroutine is solely for setting up the ``param_names`` attribute
itself.
See ``_init_param_descriptors`` for the full parameter setup.
"""
# Currently this skips over Model *instances* in the expression tree;
# basically these are treated as constants and do not add
# fittable/tunable parameters to the compound model.
# TODO: I'm not 100% happy with this design, and maybe we need some
# interface for distinguishing fittable/settable parameters with
# *constant* parameters (which would be distinct from parameters with
# fixed constraints since they're permanently locked in place). But I'm
# not sure if this is really the best way to treat the issue.
names = []
param_map = {}
# Start counting the suffix indices to put on parameter names from the
# slice_offset. Usually this will just be zero, but for compound
# models that were sliced from another compound model this may be > 0
param_suffix = cls._slice_offset
for idx, model in enumerate(cls._get_submodels()):
if not model.param_names:
# Skip models that don't have parameters in the numbering
# TODO: Reevaluate this if it turns out to be confusing, though
# parameter-less models are not very common in practice (there
# are a few projections that don't take parameters)
continue
for param_name in model.param_names:
# This is sort of heuristic, but we want to check that
# model.param_name *actually* returns a Parameter descriptor,
# and that the model isn't some inconsistent type that happens
# to have a param_names attribute but does not actually
# implement settable parameters.
# In the future we can probably remove this check, but this is
# here specifically to support the legacy compat
# _CompositeModel which can be considered a pathological case
# in the context of the new framework
# if not isinstance(getattr(model, param_name, None),
# Parameter):
# break
name = '{0}_{1}'.format(param_name, param_suffix + idx)
names.append(name)
param_map[name] = (idx, param_name)
cls._param_names = tuple(names)
cls._param_map = param_map
cls._param_map_inverse = dict((v, k) for k, v in param_map.items())
def _format_expression(cls):
# TODO: At some point might be useful to make a public version of this,
# albeit with more formatting options
return cls._tree.format_expression(OPERATOR_PRECEDENCE)
def _format_components(cls):
return '\n\n'.join('[{0}]: {1!r}'.format(idx, m)
for idx, m in enumerate(cls._get_submodels()))
def _normalize_index(cls, index):
"""
Converts an index given to __getitem__ to either an integer, or
a slice with integer start and stop values.
If the length of the slice is exactly 1 this converts the index to a
simple integer lookup.
Negative integers are converted to positive integers.
"""
def get_index_from_name(name):
try:
return cls.submodel_names.index(name)
except ValueError:
raise IndexError(
'Compound model {0} does not have a component named '
'{1}'.format(cls.name, name))
def check_for_negative_index(index):
if index < 0:
new_index = len(cls.submodel_names) + index
if new_index < 0:
# If still < 0 then this is an invalid index
raise IndexError(
"Model index {0} out of range.".format(index))
else:
index = new_index
return index
if isinstance(index, str):
return get_index_from_name(index)
elif isinstance(index, slice):
if index.step not in (1, None):
# In principle it could be but I can scarcely imagine a case
# where it would be useful. If someone can think of one then
# we can enable it.
raise ValueError(
"Step not supported for compound model slicing.")
start = index.start if index.start is not None else 0
stop = (index.stop
if index.stop is not None else len(cls.submodel_names))
if isinstance(start, (int, np.integer)):
start = check_for_negative_index(start)
if isinstance(stop, (int, np.integer)):
stop = check_for_negative_index(stop)
if isinstance(start, str):
start = get_index_from_name(start)
if isinstance(stop, str):
stop = get_index_from_name(stop) + 1
length = stop - start
if length == 1:
return start
elif length <= 0:
raise ValueError("Empty slice of a compound model.")
return slice(start, stop)
elif isinstance(index, (int, np.integer)):
if index >= len(cls.submodel_names):
raise IndexError(
"Model index {0} out of range.".format(index))
return check_for_negative_index(index)
raise TypeError(
'Submodels can be indexed either by their integer order or '
'their name (got {0!r}).'.format(index))
def _get_slice(cls, start, stop):
"""
Return a new model build from a sub-expression of the expression
represented by this model.
Right now this is highly inefficient, as it creates a new temporary
model for each operator that appears in the sub-expression. It would
be better if this just built a new expression tree, and the new model
instantiated directly from that tree.
Once tree -> model instantiation is possible this should be fixed to
use that instead.
"""
members = {'_slice_offset': cls._slice_offset + start}
operators = dict((oper, _model_oper(oper, additional_members=members))
for oper in BINARY_OPERATORS)
return cls._tree.evaluate(operators, start=start, stop=stop)
@staticmethod
def _model_evaluate_getter(idx, model):
n_params = len(model.param_names)
n_inputs = model.n_inputs
n_outputs = model.n_outputs
# If model is not an instance, we need to instantiate it to make sure
# that we can call _validate_input_units (since e.g. input_units can
# be an instance property).
def evaluate_wrapper(model, inputs, param_values):
inputs = model._validate_input_units(inputs)
outputs = model.evaluate(*inputs, *param_values)
if n_outputs == 1:
outputs = (outputs,)
return model._process_output_units(inputs, outputs)
if isinstance(model, Model):
def f(inputs, params):
param_values = tuple(islice(params, n_params))
return evaluate_wrapper(model, inputs, param_values)
else:
# Where previously model was a class, now make an instance
def f(inputs, params):
param_values = tuple(islice(params, n_params))
m = model(*param_values)
return evaluate_wrapper(m, inputs, param_values)
return (f, n_inputs, n_outputs)
class _CompoundModel(Model, metaclass=_CompoundModelMeta):
fit_deriv = None
col_fit_deriv = False
_submodels = None
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return super()._format_str(keywords=keywords)
def _generate_input_output_units_dict(self, mapping, attr):
"""
This method is used to transform dict or bool settings from
submodels into a single dictionary for the composite model,
taking into account renaming of input parameters.
"""
d = {}
for inp, (model, orig_inp) in mapping.items():
mattr = getattr(model, attr)
if isinstance(mattr, dict):
if orig_inp in mattr:
d[inp] = mattr[orig_inp]
elif isinstance(mattr, bool):
d[inp] = mattr
if d: # Note that if d is empty, we just return None
return d
@property
def _supports_unit_fitting(self):
return False
@property
def input_units_allow_dimensionless(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_allow_dimensionless')
@property
def input_units_strict(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_strict')
@property
def input_units(self):
return self._generate_input_output_units_dict(self._tree.inputs_map, 'input_units')
@property
def input_units_equivalencies(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_equivalencies')
@property
def return_units(self):
return self._generate_input_output_units_dict(self._tree.outputs_map,
'return_units')
def __getattr__(self, attr):
# This __getattr__ is necessary, because _CompoundModelMeta creates
# Parameter descriptors *lazily*--they do not exist in the class
# __dict__ until one of them has been accessed.
# However, this is at odds with how Python looks up descriptors (see
# (https://docs.python.org/3/reference/datamodel.html#invoking-descriptors)
# which is to look directly in the class __dict__
# This workaround allows descriptors to work correctly when they are
# not initially found in the class __dict__
value = getattr(self.__class__, attr)
if hasattr(value, '__get__'):
# Object is a descriptor, so we should really return the result of
# its __get__
value = value.__get__(self, self.__class__)
return value
def __getitem__(self, index):
index = self.__class__._normalize_index(index)
model = self.__class__[index]
if isinstance(index, slice):
param_names = model.param_names
else:
param_map = self.__class__._param_map_inverse
param_names = tuple(param_map[index, name]
for name in model.param_names)
return model._from_existing(self, param_names)
@property
def submodel_names(self):
return self.__class__.submodel_names
@sharedmethod
def n_submodels(self):
return len(self.submodel_names)
@property
def param_names(self):
return self.__class__.param_names
@property
def fittable(self):
return self.__class__.fittable
@sharedmethod
def evaluate(self, *args):
return self.__class__.evaluate(*args)
# TODO: The way this works is highly inefficient--the inverse is created by
# making a new model for each operator in the compound model, which could
# potentially mean creating a large number of temporary throwaway model
# classes. This can definitely be optimized in the future by implementing
# a way to construct a single model class from an existing tree
@property
def inverse(self):
def _not_implemented(oper):
def _raise(x, y):
raise NotImplementedError(
"The inverse is not currently defined for compound "
"models created using the {0} operator.".format(oper))
return _raise
operators = dict((oper, _not_implemented(oper))
for oper in ('+', '-', '*', '/', '**'))
operators['&'] = operator.and_
# Reverse the order of compositions
operators['|'] = lambda x, y: operator.or_(y, x)
def getter(idx, model):
try:
# By indexing on self[] this will return an instance of the
# model, with all the appropriate parameters set, which is
# currently required to return an inverse
return self[idx].inverse
except NotImplementedError:
raise NotImplementedError(
"All models in a composite model must have an inverse "
"defined in order for the composite model to have an "
"inverse. {0!r} does not have an inverse.".format(model))
return self._tree.evaluate(operators, getter=getter)
@sharedmethod
def _get_submodels(self):
return self.__class__._get_submodels()
def _parameter_units_for_data_units(self, input_units, output_units):
units_for_data = {}
for imodel, model in enumerate(self._submodels):
units_for_data_sub = model._parameter_units_for_data_units(input_units, output_units)
for param_sub in units_for_data_sub:
param = self._param_map_inverse[(imodel, param_sub)]
units_for_data[param] = units_for_data_sub[param_sub]
return units_for_data
def deepcopy(self):
"""
Return a deep copy of a compound model.
"""
new_model = self.copy()
new_model._submodels = [model.deepcopy() for model in self._submodels]
return new_model
def custom_model(*args, fit_deriv=None, **kwargs):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if kwargs:
warnings.warn(
"Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
"{0} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any).".format(__name__))
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable "
"object")
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other "
"callable object")
model_name = func.__name__
inputs, params = get_inputs_and_params(func)
if (fit_deriv is not None and
len(fit_deriv.__defaults__) != len(params)):
raise ModelDefinitionError("derivative function should accept "
"same number of parameters as func.")
# TODO: Maybe have a clever scheme for default output name?
if inputs:
output_names = (inputs[0].name,)
else:
output_names = ('x',)
params = dict((param.name, Parameter(param.name, default=param.default))
for param in params)
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
members = {
'__module__': str(modname),
'__doc__': func.__doc__,
'inputs': tuple(x.name for x in inputs),
'outputs': output_names,
'evaluate': staticmethod(func),
}
if fit_deriv is not None:
members['fit_deriv'] = staticmethod(fit_deriv)
members.update(params)
return type(model_name, (FittableModel,), members)
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from ``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError('If no bounding_box is set, coords or arr must be input.')
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError('number of array dimensions inconsistent with '
'number of model inputs.')
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError('coordinate length inconsistent with the number '
'of model inputs.')
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError('coordinate shape inconsistent with the '
'array shape.')
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError('The `bounding_box` is larger than the input'
' arr in one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def _prepare_inputs_single_model(model, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if model.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if model.n_outputs > model.n_inputs:
if len(set(broadcasts)) > 1:
raise ValueError(
"For models with n_outputs > n_inputs, the combination of "
"all inputs and parameters must broadcast to the same shape, "
"which will be used as the shape of all outputs. In this "
"case some of the inputs had different shapes, so it is "
"ambiguous how to format outputs for this model. Try using "
"inputs that are all the same size and shape.")
else:
# Extend the broadcasts list to include shapes for all outputs
extra_outputs = model.n_outputs - model.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_model)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
def _prepare_outputs_single_model(model, outputs, format_info):
broadcasts = format_info[0]
outputs = list(outputs)
for idx, output in enumerate(outputs):
broadcast_shape = broadcasts[idx]
if broadcast_shape is not None:
if not broadcast_shape:
# Shape is (), i.e. a scalar should be returned
outputs[idx] = np.asscalar(output)
else:
outputs[idx] = output.reshape(broadcast_shape)
return tuple(outputs)
def _prepare_inputs_model_set(model, params, inputs, n_models, model_set_axis,
**kwargs):
reshaped = []
pivots = []
for idx, _input in enumerate(inputs):
max_param_shape = ()
if n_models > 1 and model_set_axis is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (_input.shape[:model_set_axis] +
_input.shape[model_set_axis + 1:])
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(input_shape, param.shape)
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(param.shape) > len(max_param_shape):
max_param_shape = param.shape
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model.model_set_axis
else:
pivot = input_ndim - len(max_param_shape)
new_shape = (_input.shape[:pivot] + (1,) +
_input.shape[pivot:])
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = model.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (_input.shape[:pivot + 1] + new_axes +
_input.shape[pivot + 1:])
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis,
pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if model.n_inputs < model.n_outputs:
pivots.extend([model_set_axis] * (model.n_outputs - model.n_inputs))
return reshaped, (pivots,)
def _prepare_outputs_model_set(model, outputs, format_info, model_set_axis):
pivots = format_info[0]
# If model_set_axis = False was passed then use
# model._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = model.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot,
model_set_axis)
return tuple(outputs)
def _validate_input_shapes(inputs, argnames, n_models, model_set_axis,
validate_broadcasting):
"""
Perform basic validation of model inputs--that they are mutually
broadcastable and that they have the minimum dimensions for the given
model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = n_models > 1 and model_set_axis is not False
if not (validate_broadcasting or check_model_set_axis):
# Nothing else needed here
return
all_shapes = []
for idx, _input in enumerate(inputs):
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
"For model_set_axis={0}, all inputs must be at "
"least {1}-dimensional.".format(
model_set_axis, model_set_axis + 1))
elif input_shape[model_set_axis] != n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
"Input argument {0!r} does not have the correct "
"dimensions in model_set_axis={1} for a model set with "
"n_models={2}.".format(argname, model_set_axis,
n_models))
all_shapes.append(input_shape)
if not validate_broadcasting:
return
try:
input_broadcast = check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
arg_a = argnames[shape_a_idx]
arg_b = argnames[shape_b_idx]
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot "
"be broadcast with input {2!r} of shape {3!r}".format(
arg_a, shape_a, arg_b, shape_b))
return input_broadcast
copyreg.pickle(_ModelMeta, _ModelMeta.__reduce__)
copyreg.pickle(_CompoundModelMeta, _CompoundModelMeta.__reduce__)
|
eaa46ff73c103c875cbd7d2db15ec53121f1d8a1b70211e54f02698cefa59a01 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Creates a common namespace for all pre-defined models.
"""
from .core import custom_model # pylint: disable=W0611
from .mappings import *
from .projections import *
from .rotations import *
from .polynomial import *
from .functional_models import *
from .powerlaws import *
from .tabular import *
from .blackbody import BlackBody1D
"""
Attach a docstring explaining constraints to all models which support them.
Note: add new models to this list
"""
CONSTRAINTS_DOC = """
Other Parameters
----------------
fixed : a dict, optional
A dictionary ``{parameter_name: boolean}`` of parameters to not be
varied during fitting. True means the parameter is held fixed.
Alternatively the `~astropy.modeling.Parameter.fixed`
property of a parameter may be used.
tied : dict, optional
A dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship. Alternatively the
`~astropy.modeling.Parameter.tied` property of a parameter
may be used.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively, the
`~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` properties of a parameter
may be used.
eqcons : list, optional
A list of functions of length ``n`` such that ``eqcons[j](x0,*args) ==
0.0`` in a successfully optimized problem.
ineqcons : list, optional
A list of functions of length ``n`` such that ``ieqcons[j](x0,*args) >=
0.0`` is a successfully optimized problem.
"""
MODELS_WITH_CONSTRAINTS = [
AiryDisk2D, Moffat1D, Moffat2D, Box1D, Box2D,
Const1D, Const2D, Ellipse2D, Disk2D,
Gaussian1D, Gaussian2D,
Linear1D, Lorentz1D, MexicanHat1D, MexicanHat2D,
PowerLaw1D, Sersic1D, Sersic2D, Sine1D, Trapezoid1D, TrapezoidDisk2D,
Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre2D, Legendre1D,
Polynomial1D, Polynomial2D, Voigt1D
]
for item in MODELS_WITH_CONSTRAINTS:
if isinstance(item.__doc__, str):
item.__doc__ += CONSTRAINTS_DOC
|
121edcbb4e333eed41db9f0e79fd561ee867a914dd23bcc13e7ca9bcf6f515a0 | """
Special models useful for complex compound models where control is needed over
which outputs from a source model are mapped to which inputs of a target model.
"""
from .core import FittableModel
__all__ = ['Mapping', 'Identity']
class Mapping(FittableModel):
"""
Allows inputs to be reordered, duplicated or dropped.
Parameters
----------
mapping : tuple
A tuple of integers representing indices of the inputs to this model
to return and in what order to return them. See
:ref:`compound-model-mappings` for more details.
n_inputs : int
Number of inputs; if `None` (default) then ``max(mapping) + 1`` is
used (i.e. the highest input index used in the mapping).
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Raises
------
TypeError
Raised when number of inputs is less that ``max(mapping)``.
Examples
--------
>>> from astropy.modeling.models import Polynomial2D, Shift, Mapping
>>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
>>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1)
>>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2)
>>> model(1, 2) # doctest: +FLOAT_CMP
(17.0, 14.2)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, mapping, n_inputs=None, name=None, meta=None):
if n_inputs is None:
self._inputs = tuple('x' + str(idx)
for idx in range(max(mapping) + 1))
else:
self._inputs = tuple('x' + str(idx)
for idx in range(n_inputs))
self._outputs = tuple('x' + str(idx) for idx in range(len(mapping)))
self._mapping = mapping
self._input_units_strict = {key: False for key in self._inputs}
self._input_units_allow_dimensionless = {key: False for key in self._inputs}
super().__init__(name=name, meta=meta)
@property
def inputs(self):
"""
The name(s) of the input variable(s) on which a model is evaluated.
"""
return self._inputs
@property
def outputs(self):
"""The name(s) of the output(s) of the model."""
return self._outputs
@property
def mapping(self):
"""Integers representing indices of the inputs."""
return self._mapping
def __repr__(self):
if self.name is None:
return '<Mapping({0})>'.format(self.mapping)
else:
return '<Mapping({0}, name={1})>'.format(self.mapping, self.name)
def evaluate(self, *args):
if len(args) != self.n_inputs:
name = self.name if self.name is not None else "Mapping"
raise TypeError('{0} expects {1} inputs; got {2}'.format(
name, self.n_inputs, len(args)))
result = tuple(args[idx] for idx in self._mapping)
if self.n_outputs == 1:
return result[0]
return result
@property
def inverse(self):
"""
A `Mapping` representing the inverse of the current mapping.
Raises
------
`NotImplementedError`
An inverse does no exist on mappings that drop some of its inputs
(there is then no way to reconstruct the inputs that were dropped).
"""
try:
mapping = tuple(self.mapping.index(idx)
for idx in range(self.n_inputs))
except ValueError:
raise NotImplementedError(
"Mappings such as {0} that drop one or more of their inputs "
"are not invertible at this time.".format(self.mapping))
inv = self.__class__(mapping)
inv._inputs = self._outputs
inv._outputs = self._inputs
return inv
class Identity(Mapping):
"""
Returns inputs unchanged.
This class is useful in compound models when some of the inputs must be
passed unchanged to the next model.
Parameters
----------
n_inputs : int
Specifies the number of inputs this identity model accepts.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Examples
--------
Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs::
>>> from astropy.modeling.models import (Polynomial1D, Shift, Scale,
... Identity)
>>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2)
>>> model(1,1) # doctest: +FLOAT_CMP
(2.4, 2.0)
>>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP
(1.0, 1.0)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, n_inputs, name=None, meta=None):
mapping = tuple(range(n_inputs))
super().__init__(mapping, name=name, meta=meta)
def __repr__(self):
if self.name is None:
return '<Identity({0})>'.format(self.n_inputs)
else:
return '<Identity({0}, name={1})>'.format(self.n_inputs, self.name)
@property
def inverse(self):
"""
The inverse transformation.
In this case of `Identity`, ``self.inverse is self``.
"""
return self
|
084e9d51ade399b0367093c1b87a0beb433d7065fda8aa44c9e1cd026cadfa14 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Implements rotations, including spherical rotations as defined in WCS Paper II
[1]_
`RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in
WCS Paper II to rotate to/from a native sphere and the celestial sphere.
The implementation uses `EulerAngleRotation`. The model parameters are
three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point
in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial
pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat``
and ``-(lon_pole-90)``.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import math
import numpy as np
from .core import Model
from .parameters import Parameter
from ..coordinates.matrix_utilities import rotation_matrix, matrix_product
from .. import units as u
from ..utils.decorators import deprecated
from .utils import _to_radian, _to_orig_unit
__all__ = ['RotateCelestial2Native', 'RotateNative2Celestial', 'Rotation2D',
'EulerAngleRotation']
class _EulerRotation:
"""
Base class which does the actual computation.
"""
_separable = False
def _create_matrix(self, phi, theta, psi, axes_order):
matrices = []
for angle, axis in zip([phi, theta, psi], axes_order):
if isinstance(angle, u.Quantity):
angle = angle.value
angle = np.asscalar(angle)
matrices.append(rotation_matrix(angle, axis, unit=u.rad))
result = matrix_product(*matrices[::-1])
return result
@staticmethod
def spherical2cartesian(alpha, delta):
alpha = np.deg2rad(alpha)
delta = np.deg2rad(delta)
x = np.cos(alpha) * np.cos(delta)
y = np.cos(delta) * np.sin(alpha)
z = np.sin(delta)
return np.array([x, y, z])
@staticmethod
def cartesian2spherical(x, y, z):
h = np.hypot(x, y)
alpha = np.rad2deg(np.arctan2(y, x))
delta = np.rad2deg(np.arctan2(z, h))
return alpha, delta
@deprecated(2.0)
@staticmethod
def rotation_matrix_from_angle(angle):
"""
Clockwise rotation matrix.
Parameters
----------
angle : float
Rotation angle in radians.
"""
return np.array([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
def evaluate(self, alpha, delta, phi, theta, psi, axes_order):
shape = None
if isinstance(alpha, np.ndarray) and alpha.ndim == 2:
alpha = alpha.flatten()
delta = delta.flatten()
shape = alpha.shape
inp = self.spherical2cartesian(alpha, delta)
matrix = self._create_matrix(phi, theta, psi, axes_order)
result = np.dot(matrix, inp)
a, b = self.cartesian2spherical(*result)
if shape is not None:
a.shape = shape
b.shape = shape
return a, b
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
""" Input units. """
return {'alpha': u.deg, 'delta': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha': u.deg, 'delta': u.deg}
class EulerAngleRotation(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity`
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
inputs = ('alpha', 'delta')
outputs = ('alpha', 'delta')
phi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
theta = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
psi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ['x', 'y', 'z']
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3,"
"got {0}".format(axes_order))
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError("Unrecognized axis label {0}; "
"should be one of {1} ".format(unrecognized, self.axes))
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
def inverse(self):
return self.__class__(phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1])
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
class _SkyRotation(_EulerRotation, Model):
"""
Base class for RotateNative2Celestial and RotateCelestial2Native.
"""
lon = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lat = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lon_pole = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, lon, lat, lon_pole, **kwargs):
qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(lon, lat, lon_pole, **kwargs)
self.axes_order = 'zxz'
def _evaluate(self, phi, theta, lon, lat, lon_pole):
alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole,
self.axes_order)
mask = alpha < 0
if isinstance(mask, np.ndarray):
alpha[mask] += 360
else:
alpha += 360
return alpha, delta
class RotateNative2Celestial(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg.
"""
#: Inputs are angles on the native sphere
inputs = ('phi_N', 'theta_N')
#: Outputs are angles on the celestial sphere
outputs = ('alpha_C', 'delta_C')
@property
def input_units(self):
""" Input units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles in the Native coordinate system.
lon, lat, lon_pole : float (in deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles on the Celestial sphere.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = - (np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
class RotateCelestial2Native(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg.
"""
#: Inputs are angles on the celestial sphere
inputs = ('alpha_C', 'delta_C')
#: Outputs are angles on the native sphere
outputs = ('phi_N', 'theta_N')
@property
def input_units(self):
""" Input units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
@property
def return_units(self):
""" Output units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles in the Celestial coordinate frame.
lon, lat, lon_pole : float (deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles on the Native sphere.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = (np.pi / 2 + lon)
theta = (np.pi / 2 - lat)
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
class Rotation2D(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity`
Angle of rotation (if float it should be in deg).
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
_separable = False
angle = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : ndarray-like
Input quantities
angle : float (deg) or `~astropy.units.Quantity`
Angle of rotations.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# If one argument has units, enforce they both have units and they are compatible.
x_unit = getattr(x, 'unit', None)
y_unit = getattr(y, 'unit', None)
has_units = x_unit is not None and y_unit is not None
if x_unit != y_unit:
if has_units and y_unit.is_equivalent(x_unit):
y = y.to(x_unit)
y_unit = x_unit
else:
raise u.UnitsError("x and y must have compatible units")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.flatten(), y.flatten()])
if isinstance(angle, u.Quantity):
angle = angle.to_value(u.rad)
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if has_units:
return u.Quantity(x, unit=x_unit), u.Quantity(y, unit=y_unit)
else:
return x, y
@staticmethod
def _compute_matrix(angle):
return np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]],
dtype=np.float64)
|
47f338e4f71e228785e626a90735c5641f755756f5111a4c8a47131239aaa5e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
from collections import OrderedDict
import numpy as np
from .core import (Fittable1DModel, Fittable2DModel,
ModelDefinitionError)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
from ..stats.funcs import gaussian_sigma_to_fwhm
from .. import units as u
from ..units import Quantity, UnitsError
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D',
'Gaussian2D', 'Linear1D', 'Lorentz1D',
'MexicanHat1D', 'MexicanHat2D', 'RedshiftScaleFactor',
'Scale', 'Multiply', 'Sersic1D', 'Sersic2D', 'Shift', 'Sine1D', 'Trapezoid1D',
'TrapezoidDisk2D', 'Ring2D', 'Voigt1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
mean : float
Mean of the Gaussian.
stddev : float
Standard deviation of the Gaussian.
Notes
-----
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1)
mean = Parameter(default=0)
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None))
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * gaussian_sigma_to_fwhm
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
else:
return {'x': self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('mean', inputs_unit['x']),
('stddev', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
x_mean : float
Mean of the Gaussian in x.
y_mean : float
Mean of the Gaussian in y.
x_stddev : float or None
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or None
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float, optional
Rotation angle in radians. The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1)
x_mean = Parameter(default=0)
y_mean = Parameter(default=0)
x_stddev = Parameter(default=1)
y_stddev = Parameter(default=1)
theta = Parameter(default=0.0)
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
else:
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
# TODO: Maybe it should be possible for the covariance matrix
# to be some (x, y, ..., z, 2, 2) array to be broadcast with
# other parameters of shape (x, y, ..., z)
# But that's maybe a special case to work out if/when needed
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * gaussian_sigma_to_fwhm
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * gaussian_sigma_to_fwhm
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
else:
return {'x': self.x_mean.unit,
'y': self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_mean', inputs_unit['x']),
('y_mean', inputs_unit['x']),
('x_stddev', inputs_unit['x']),
('y_stddev', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
inputs = ('x',)
outputs = ('x',)
offset = Parameter(default=0)
linear = True
@property
def input_units(self):
if self.offset.unit is None:
return None
else:
return {'x': self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
inputs = ('x',)
outputs = ('x',)
factor = Parameter(default=1)
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
if self.factor.unit is None:
return None
else:
return {'x': self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
inputs = ('x',)
outputs = ('x',)
factor = Parameter(default=1)
linear = True
fittable = True
@property
def inverse(self):
"""One dimensional inverse multiply model function"""
inv = self.copy()
inv.factor = 1 / self.factor
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter"""
d_factor = x
return [d_factor]
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='redshift', default=0)
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic1D model requires scipy > 0.11.')
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
else:
return {'x': self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('r_eff', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Sine1D(Fittable1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
amplitude = Parameter(default=1)
frequency = Parameter(default=1)
phase = Parameter(default=0)
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def input_units(self):
if self.frequency.unit is None:
return None
else:
return {'x': 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('frequency', inputs_unit['x'] ** -1),
('amplitude', outputs_unit['y'])])
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, slope, intercept):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
else:
return {'x': self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('intercept', outputs_unit['y']),
('slope', outputs_unit['y'] / inputs_unit['x'])])
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the straight line in X
slope_y : float
Slope of the straight line in Y
intercept : float
Z-intercept of the straight line
See Also
--------
Linear1D, Polynomial2D
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1)
slope_y = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
See Also
--------
Gaussian1D, Box1D, MexicanHat1D
Notes
-----
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
fwhm = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float
Position of the peak
amplitude_L : float
The Lorentzian amplitude
fwhm_L : float
The Lorentzian full width at half maximum
fwhm_G : float
The Gaussian full width at half maximum
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Algorithm for the computation taken from
McLean, A. B., Mitchell, C. E. J. & Swanston, D. M. Implementation of an
efficient analytical approximation to the Voigt function for photoemission
lineshape analysis. Journal of Electron Spectroscopy and Related Phenomena
69, 125-132 (1994)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0)
amplitude_L = Parameter(default=1)
fwhm_L = Parameter(default=2/np.pi)
fwhm_G = Parameter(default=np.log(2))
_abcd = np.array([
[-1.2150, -1.3509, -1.2150, -1.3509], # A
[1.2359, 0.3786, -1.2359, -0.3786], # B
[-0.3085, 0.5906, -0.3085, 0.5906], # C
[0.0210, -1.1858, -0.0210, 1.1858]]) # D
@classmethod
def evaluate(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[..., np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[..., np.newaxis]
V = np.sum((C * (Y - A) + D * (X - B))/(((Y - A) ** 2 + (X - B) ** 2)), axis=-1)
return (fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G) * V
@classmethod
def fit_deriv(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[:, np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[:, np.newaxis]
constant = fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G
alpha = C * (Y - A) + D * (X - B)
beta = (Y - A) ** 2 + (X - B) ** 2
V = np.sum((alpha / beta), axis=-1)
dVdx = np.sum((D/beta - 2 * (X - B) * alpha / np.square(beta)), axis=-1)
dVdy = np.sum((C/beta - 2 * (Y - A) * alpha / np.square(beta)), axis=-1)
dyda = [-constant * dVdx * 2 * sqrt_ln2 / fwhm_G,
constant * V / amplitude_L,
constant * (V / fwhm_L + dVdy * sqrt_ln2 / fwhm_G),
-constant * (V + (sqrt_ln2 / fwhm_G) * (2 * (x - x_0) * dVdx + fwhm_L * dVdy)) / fwhm_G]
return dyda
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm_L', inputs_unit['x']),
('fwhm_G', inputs_unit['x']),
('amplitude_L', outputs_unit['y'])])
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['y'])])
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['z'])])
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float
The rotation angle in radians of the semimajor axis. The
rotation angle increases counterclockwise from the positive x
axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
a = Parameter(default=1)
b = Parameter(default=1)
theta = Parameter(default=0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('a', inputs_unit['x']),
('b', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
r_in = Parameter(default=1)
width = Parameter(default=1)
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=r_in.default, width=width.default,
r_out=None, **kwargs):
# If outer radius explicitly given, it overrides default width.
if r_out is not None:
if width != self.width.default:
raise InputParameterError(
"Cannot specify both width and outer radius separately.")
width = r_out - r_in
elif width is None:
width = self.width.default
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_in', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Delta1D(Fittable1DModel):
"""One dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Delta2D(Fittable2DModel):
"""Two dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
result = np.select([inside], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
x_width = Parameter(default=1)
y_width = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['y']),
('x_width', inputs_unit['x']),
('y_width', inputs_unit['y']),
('amplitude', outputs_unit['z'])])
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('slope', outputs_unit['y'] / inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('slope', outputs_unit['z'] / inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class MexicanHat1D(Fittable1DModel):
"""
One dimensional Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import MexicanHat1D
plt.figure()
s1 = MexicanHat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Mexican Hat model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class MexicanHat2D(Fittable2DModel):
"""
Two dimensional symmetric Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Mexican Hat model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
radius = Parameter(default=1)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
try:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
except ValueError:
raise ImportError('AiryDisk2D model requires scipy > 0.11.')
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('radius', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * self.gamma * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
d_A = (1 + (x - x_0) ** 2 / gamma ** 2) ** (-alpha)
d_x_0 = (-amplitude * alpha * d_A * (-2 * x + 2 * x_0) /
(gamma ** 2 * d_A ** alpha))
d_gamma = (2 * amplitude * alpha * d_A * (x - x_0) ** 2 /
(gamma ** 3 * d_A ** alpha))
d_alpha = -amplitude * d_A * np.log(1 + (x - x_0) ** 2 / gamma ** 2)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * self.gamma * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (-amplitude * alpha * d_A * (-2 * x + 2 * x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (-amplitude * alpha * d_A * (-2 * y + 2 * y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = 2 * amplitude * alpha * d_A * (rr_gg / (gamma * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float, optional
Rotation angle in radians, counterclockwise from
the positive x-axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
ellip = Parameter(default=0)
theta = Parameter(default=0)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic2D model requires scipy > 0.11.')
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_eff', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
|
723d99a0bae88e4361368b05e41b137748361067c99a05c3970c71bacb04a707 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tabular models.
Tabular models of any dimension can be created using `tabular_model`.
For convenience `Tabular1D` and `Tabular2D` are provided.
Examples
--------
>>> table = np.array([[ 3., 0., 0.],
... [ 0., 2., 0.],
... [ 0., 0., 0.]])
>>> points = ([1, 2, 3], [1, 2, 3])
>>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False,
... fill_value=None, method='nearest')
"""
import abc
import numpy as np
from .core import Model
from .. import units as u
from ..utils import minversion
try:
import scipy
from scipy.interpolate import interpn
has_scipy = True
except ImportError:
has_scipy = False
has_scipy = has_scipy and minversion(scipy, "0.14")
__all__ = ['tabular_model', 'Tabular1D', 'Tabular2D']
__doctest_requires__ = {('tabular_model'): ['scipy']}
class _Tabular(Model):
"""
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ), optional
The points defining the regular grid in n dimensions.
lookup_table : array-like, shape (m1, ..., mn, ...)
The data on a regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float or `~astropy.units.Quantity`, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d". If Quantity is given, it will be converted to the unit of
``lookup_table``, if applicable.
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
linear = False
fittable = False
standard_broadcasting = False
outputs = ('y',)
@property
@abc.abstractmethod
def lookup_table(self):
pass
_is_dynamic = True
_id = 0
def __init__(self, points=None, lookup_table=None, method='linear',
bounds_error=True, fill_value=np.nan, **kwargs):
n_models = kwargs.get('n_models', 1)
if n_models > 1:
raise NotImplementedError('Only n_models=1 is supported.')
super().__init__(**kwargs)
if lookup_table is None:
raise ValueError('Must provide a lookup table.')
if not isinstance(lookup_table, u.Quantity):
lookup_table = np.asarray(lookup_table)
if self.lookup_table.ndim != lookup_table.ndim:
raise ValueError("lookup_table should be an array with "
"{0} dimensions.".format(self.lookup_table.ndim))
if points is None:
points = tuple(np.arange(x, dtype=float)
for x in lookup_table.shape)
else:
if lookup_table.ndim == 1 and not isinstance(points, tuple):
points = (points,)
npts = len(points)
if npts != lookup_table.ndim:
raise ValueError(
"Expected grid points in "
"{0} directions, got {1}.".format(lookup_table.ndim, npts))
if (npts > 1 and isinstance(points[0], u.Quantity) and
len(set([getattr(p, 'unit', None) for p in points])) > 1):
raise ValueError('points must all have the same unit.')
if isinstance(fill_value, u.Quantity):
if not isinstance(lookup_table, u.Quantity):
raise ValueError('fill value is in {0} but expected to be '
'unitless.'.format(fill_value.unit))
fill_value = fill_value.to(lookup_table.unit).value
self.points = points
self.lookup_table = lookup_table
self.bounds_error = bounds_error
self.method = method
self.fill_value = fill_value
def __repr__(self):
fmt = "<{0}(points={1}, lookup_table={2})>".format(
self.__class__.__name__, self.points, self.lookup_table)
return fmt
def __str__(self):
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Parameters', ""),
(' points', self.points),
(' lookup_table', self.lookup_table),
(' method', self.method),
(' fill_value', self.fill_value),
(' bounds_error', self.bounds_error)
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords
if value is not None]
return '\n'.join(parts)
@property
def input_units(self):
pts = self.points[0]
if not isinstance(pts, u.Quantity):
return None
else:
return dict([(x, pts.unit) for x in self.inputs])
@property
def return_units(self):
if not isinstance(self.lookup_table, u.Quantity):
return None
else:
return {'y': self.lookup_table.unit}
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(points_low, points_high)``.
Examples
--------
>>> from astropy.modeling.models import Tabular1D, Tabular2D
>>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30])
>>> t1.bounding_box
(1, 3)
>>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]],
... lookup_table=[[10, 20, 30], [20, 30, 40]])
>>> t2.bounding_box
((2, 4), (1, 3))
"""
bbox = [(min(p), max(p)) for p in self.points][::-1]
if len(bbox) == 1:
bbox = bbox[0]
return tuple(bbox)
def evaluate(self, *inputs):
"""
Return the interpolated values at the input coordinates.
Parameters
----------
inputs : list of scalars or ndarrays
Input coordinates. The number of inputs must be equal
to the dimensions of the lookup table.
"""
if isinstance(inputs, u.Quantity):
inputs = inputs.value
shape = inputs[0].shape
inputs = [inp.flatten() for inp in inputs[: self.n_inputs]]
inputs = np.array(inputs).T
if not has_scipy: # pragma: no cover
raise ImportError("This model requires scipy >= v0.14")
result = interpn(self.points, self.lookup_table, inputs,
method=self.method, bounds_error=self.bounds_error,
fill_value=self.fill_value)
# return_units not respected when points has no units
if (isinstance(self.lookup_table, u.Quantity) and
not isinstance(self.points[0], u.Quantity)):
result = result * self.lookup_table.unit
if self.n_outputs == 1:
result = result.reshape(shape)
else:
result = [r.reshape(shape) for r in result]
return result
def tabular_model(dim, name=None):
"""
Make a ``Tabular`` model where ``n_inputs`` is
based on the dimension of the lookup_table.
This model has to be further initialized and when evaluated
returns the interpolated values.
Parameters
----------
dim : int
Dimensions of the lookup table.
name : str
Name for the class.
Examples
--------
>>> table = np.array([[3., 0., 0.],
... [0., 2., 0.],
... [0., 0., 0.]])
>>> tab = tabular_model(2, name='Tabular2D')
>>> print(tab)
<class 'abc.Tabular2D'>
Name: Tabular2D
Inputs: (u'x0', u'x1')
Outputs: (u'y',)
>>> points = ([1, 2, 3], [1, 2, 3])
Setting fill_value to None, allows extrapolation.
>>> m = tab(points, lookup_table=table, name='my_table',
... bounds_error=False, fill_value=None, method='nearest')
>>> xinterp = [0, 1, 1.5, 2.72, 3.14]
>>> m(xinterp, xinterp) # doctest: +FLOAT_CMP
array([3., 3., 3., 0., 0.])
"""
if dim < 1:
raise ValueError('Lookup table must have at least one dimension.')
table = np.zeros([2] * dim)
inputs = tuple('x{0}'.format(idx) for idx in range(table.ndim))
members = {'lookup_table': table, 'inputs': inputs}
if dim == 1:
members['_separable'] = True
else:
members['_separable'] = False
if name is None:
model_id = _Tabular._id
_Tabular._id += 1
name = 'Tabular{0}'.format(model_id)
return type(str(name), (_Tabular,), members)
Tabular1D = tabular_model(1, name='Tabular1D')
Tabular2D = tabular_model(2, name='Tabular2D')
_tab_docs = """
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
Tabular1D.__doc__ = """
Tabular model in 1D.
Returns an interpolated lookup table value.
Parameters
----------
points : array-like of float of ndim=1.
The points defining the regular grid in n dimensions.
lookup_table : array-like, of ndim=1.
The data in one dimensions.
""" + _tab_docs
Tabular2D.__doc__ = """
Tabular model in 2D.
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, m2), optional
The points defining the regular grid in n dimensions.
lookup_table : array-like, shape (m1, m2)
The data on a regular grid in 2 dimensions.
""" + _tab_docs
|
bd88bab25b9ff0131ce945032322147ccf28167543c6bde38011685b01829431 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
import functools
import inspect
import textwrap
import types
import warnings
from inspect import signature
from .codegen import make_function_with_signature
from .exceptions import (AstropyDeprecationWarning, AstropyUserWarning,
AstropyPendingDeprecationWarning)
__all__ = ['classproperty', 'deprecated', 'deprecated_attribute',
'deprecated_renamed_argument', 'format_doc',
'lazyproperty', 'sharedmethod', 'wraps']
_NotFound = object()
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: {since}'
'\n {message}\n\n'.format(
**{'since': since, 'message': message.strip()})) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message):
"""
Returns a wrapped function that displays an
``AstropyDeprecationWarning`` when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = AstropyDeprecationWarning
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__['__add__']): # nopep8
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(get_function(cls.__init__), message)
else:
cls.__new__ = deprecate_function(get_function(cls.__new__), message)
return cls
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) is type(deprecate):
if pending:
message = ('The {func} {obj_type} will be deprecated in a '
'future version.')
else:
message = ('The {func} {obj_type} is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = '\n Use {} instead.'.format(alternative)
message = ((message.format(**{
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name})) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message)
else:
return deprecate_function(obj, message)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(name, since, message=None, alternative=None,
pending=False):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``).
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = misc.deprecated_attribute('old_name', '0.1')
def method(self):
self._old_name = 42
"""
private_name = '_' + name
@deprecated(since, name=name, obj_type='attribute')
def get(self):
return getattr(self, private_name)
@deprecated(since, name=name, obj_type='attribute')
def set(self, val):
setattr(self, private_name, val)
@deprecated(since, name=name, obj_type='attribute')
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
def deprecated_renamed_argument(old_name, new_name, since,
arg_in_kwargs=False, relax=False,
pending=False):
"""Deprecate a _renamed_ function argument.
The decorator assumes that the argument with the ``old_name`` was removed
from the function signature and the ``new_name`` replaced it at the
**same position** in the signature. If the ``old_name`` argument is
given when calling the decorated function the decorator will catch it and
issue a deprecation warning and pass it on as ``new_name`` argument.
Parameters
----------
old_name : str or list/tuple thereof
The old name of the argument.
new_name : str or list/tuple thereof
The new name of the argument.
since : str or number or list/tuple thereof
The release at which the old argument became deprecated.
arg_in_kwargs : bool or list/tuple thereof, optional
If the argument is not a named argument (for example it
was meant to be consumed by ``**kwargs``) set this to
``True``. Otherwise the decorator will throw an Exception
if the ``new_name`` cannot be found in the signature of
the decorated function.
Default is ``False``.
relax : bool or list/tuple thereof, optional
If ``False`` a ``TypeError`` is raised if both ``new_name`` and
``old_name`` are given. If ``True`` the value for ``new_name`` is used
and a Warning is issued.
Default is ``False``.
pending : bool or list/tuple thereof, optional
If ``True`` this will hide the deprecation warning and ignore the
corresponding ``relax`` parameter value.
Default is ``False``.
Raises
------
TypeError
If the new argument name cannot be found in the function
signature and arg_in_kwargs was False or if it is used to
deprecate the name of the ``*args``-, ``**kwargs``-like arguments.
At runtime such an Error is raised if both the new_name
and old_name were specified when calling the function and
"relax=False".
Notes
-----
The decorator should be applied to a function where the **name**
of an argument was changed but it applies the same logic.
.. warning::
If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must
also be a list or tuple with the same number of entries. ``relax`` and
``arg_in_kwarg`` can be a single bool (applied to all) or also a
list/tuple with the same number of entries like ``new_name``, etc.
Examples
--------
The deprecation warnings are not shown in the following examples.
To deprecate a positional or keyword argument::
>>> from astropy.utils.decorators import deprecated_renamed_argument
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0')
... def test(sigma):
... return sigma
>>> test(2)
2
>>> test(sigma=2)
2
>>> test(sig=2)
2
To deprecate an argument catched inside the ``**kwargs`` the
``arg_in_kwargs`` has to be set::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0',
... arg_in_kwargs=True)
... def test(**kwargs):
... return kwargs['sigma']
>>> test(sigma=2)
2
>>> test(sig=2)
2
By default providing the new and old keyword will lead to an Exception. If
a Warning is desired set the ``relax`` argument::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True)
... def test(sigma):
... return sigma
>>> test(sig=2)
2
It is also possible to replace multiple arguments. The ``old_name``,
``new_name`` and ``since`` have to be `tuple` or `list` and contain the
same number of entries::
>>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'],
... ['1.0', 1.2])
... def test(alpha, beta):
... return alpha, beta
>>> test(a=2, b=3)
(2, 3)
In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which
is applied to all renamed arguments) or must also be a `tuple` or `list`
with values for each of the arguments.
"""
cls_iter = (list, tuple)
if isinstance(old_name, cls_iter):
n = len(old_name)
# Assume that new_name and since are correct (tuple/list with the
# appropriate length) in the spirit of the "consenting adults". But the
# optional parameters may not be set, so if these are not iterables
# wrap them.
if not isinstance(arg_in_kwargs, cls_iter):
arg_in_kwargs = [arg_in_kwargs] * n
if not isinstance(relax, cls_iter):
relax = [relax] * n
if not isinstance(pending, cls_iter):
pending = [pending] * n
else:
# To allow a uniform approach later on, wrap all arguments in lists.
n = 1
old_name = [old_name]
new_name = [new_name]
since = [since]
arg_in_kwargs = [arg_in_kwargs]
relax = [relax]
pending = [pending]
def decorator(function):
# The named arguments of the function.
arguments = signature(function).parameters
keys = list(arguments.keys())
position = [None] * n
for i in range(n):
# Determine the position of the argument.
if new_name[i] in arguments:
param = arguments[new_name[i]]
# There are several possibilities now:
# 1.) Positional or keyword argument:
if param.kind == param.POSITIONAL_OR_KEYWORD:
position[i] = keys.index(new_name[i])
# 2.) Keyword only argument:
elif param.kind == param.KEYWORD_ONLY:
# These cannot be specified by position.
position[i] = None
# 3.) positional-only argument, varargs, varkwargs or some
# unknown type:
else:
raise TypeError('cannot replace argument "{0}" of kind '
'{1!r}.'.format(new_name[i], param.kind))
# In case the argument is not found in the list of arguments
# the only remaining possibility is that it should be catched
# by some kind of **kwargs argument.
# This case has to be explicitly specified, otherwise throw
# an exception!
elif arg_in_kwargs[i]:
position[i] = None
else:
raise TypeError('"{}" was not specified in the function '
'signature. If it was meant to be part of '
'"**kwargs" then set "arg_in_kwargs" to "True"'
'.'.format(new_name[i]))
@functools.wraps(function)
def wrapper(*args, **kwargs):
for i in range(n):
# The only way to have oldkeyword inside the function is
# that it is passed as kwarg because the oldkeyword
# parameter was renamed to newkeyword.
if old_name[i] in kwargs:
value = kwargs.pop(old_name[i])
# Display the deprecation warning only when it's only
# pending.
if not pending[i]:
warnings.warn(
'"{0}" was deprecated in version {1} '
'and will be removed in a future version. '
'Use argument "{2}" instead.'
''.format(old_name[i], since[i], new_name[i]),
AstropyDeprecationWarning, stacklevel=2)
# Check if the newkeyword was given as well.
newarg_in_args = (position[i] is not None and
len(args) > position[i])
newarg_in_kwargs = new_name[i] in kwargs
if newarg_in_args or newarg_in_kwargs:
if not pending[i]:
# If both are given print a Warning if relax is
# True or raise an Exception is relax is False.
if relax[i]:
warnings.warn(
'"{0}" and "{1}" keywords were set. '
'Using the value of "{1}".'
''.format(old_name[i], new_name[i]),
AstropyUserWarning)
else:
raise TypeError(
'cannot specify both "{}" and "{}"'
'.'.format(old_name[i], new_name[i]))
else:
# If the new argument isn't specified just pass the old
# one with the name of the new argument to the function
kwargs[new_name[i]] = value
return function(*args, **kwargs)
return wrapper
return decorator
# TODO: This can still be made to work for setters by implementing an
# accompanying metaclass that supports it; we just don't need that right this
# second
class classproperty(property):
"""
Similar to `property`, but allows class-level properties. That is,
a property whose getter is like a `classmethod`.
The wrapped method may explicitly use the `classmethod` decorator (which
must become before this decorator), or the `classmethod` may be omitted
(it is implicit through use of this decorator).
.. note::
classproperty only works for *read-only* properties. It does not
currently allow writeable/deleteable properties, due to subtleties of how
Python descriptors work. In order to implement such properties on a class
a metaclass for that class must be implemented.
Parameters
----------
fget : callable
The function that computes the value of this property (in particular,
the function when this is used as a decorator) a la `property`.
doc : str, optional
The docstring for the property--by default inherited from the getter
function.
lazy : bool, optional
If True, caches the value returned by the first call to the getter
function, so that it is only called once (used for lazy evaluation
of an attribute). This is analogous to `lazyproperty`. The ``lazy``
argument can also be used when `classproperty` is used as a decorator
(see the third example below). When used in the decorator syntax this
*must* be passed in as a keyword argument.
Examples
--------
::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal + 1
...
>>> Foo.bar
2
>>> foo_instance = Foo()
>>> foo_instance.bar
2
>>> foo_instance._bar_internal = 2
>>> foo_instance.bar # Ignores instance attributes
2
As previously noted, a `classproperty` is limited to implementing
read-only attributes::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal
... @bar.setter
... def bar(cls, value):
... cls._bar_internal = value
...
Traceback (most recent call last):
...
NotImplementedError: classproperty can only be read-only; use a
metaclass to implement modifiable class-level properties
When the ``lazy`` option is used, the getter is only called once::
>>> class Foo:
... @classproperty(lazy=True)
... def bar(cls):
... print("Performing complicated calculation")
... return 1
...
>>> Foo.bar
Performing complicated calculation
1
>>> Foo.bar
1
If a subclass inherits a lazy `classproperty` the property is still
re-evaluated for the subclass::
>>> class FooSub(Foo):
... pass
...
>>> FooSub.bar
Performing complicated calculation
1
>>> FooSub.bar
1
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
if self._lazy and objtype in self._cache:
return self._cache[objtype]
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
if self._lazy:
self._cache[objtype] = val
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
class lazyproperty(property):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest:
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
By default, a setter and deleter are used which simply overwrite and
delete, respectively, the value stored in ``__dict__``. Any user-specified
setter or deleter is executed before executing these default actions.
The one exception is that the default setter is not run if the user setter
already sets the new value in ``__dict__`` and returns that value and the
returned value is not ``None``.
Adapted from the recipe at
http://code.activestate.com/recipes/363602-lazy-property-evaluation
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
def __get__(self, obj, owner=None):
try:
val = obj.__dict__.get(self._key, _NotFound)
if val is not _NotFound:
return val
else:
val = self.fget(obj)
obj.__dict__[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
ret = self.fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it took
# over setting the value in obj.__dict__; this mechanism allows
# it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
if self._key in obj.__dict__:
del obj.__dict__[self._key]
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example:
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> class Example(metaclass=ExampleMeta):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES, exclude_args=()):
"""
An alternative to `functools.wraps` which also preserves the original
function's call signature by way of
`~astropy.utils.codegen.make_function_with_signature`.
This also adds an optional ``exclude_args`` argument. If given it should
be a sequence of argument names that should not be copied from the wrapped
function (either positional or keyword arguments).
The documentation for the original `functools.wraps` follows:
"""
wrapped_args = _get_function_args(wrapped, exclude_args=exclude_args)
def wrapper(func):
if '__name__' in assigned:
name = wrapped.__name__
else:
name = func.__name__
func = make_function_with_signature(func, name=name, **wrapped_args)
func = functools.update_wrapper(func, wrapped, assigned=assigned,
updated=updated)
return func
return wrapper
if (isinstance(wraps.__doc__, str) and
wraps.__doc__ is not None and functools.wraps.__doc__ is not None):
wraps.__doc__ += functools.wraps.__doc__
def _get_function_args_internal(func):
"""
Utility function for `wraps`.
Reads the argspec for the given function and converts it to arguments
for `make_function_with_signature`.
"""
argspec = inspect.getfullargspec(func)
if argspec.defaults:
args = argspec.args[:-len(argspec.defaults)]
kwargs = zip(argspec.args[len(args):], argspec.defaults)
else:
args = argspec.args
kwargs = []
if argspec.kwonlyargs:
kwargs.extend((argname, argspec.kwonlydefaults[argname])
for argname in argspec.kwonlyargs)
return {'args': args, 'kwargs': kwargs, 'varargs': argspec.varargs,
'varkwargs': argspec.varkw}
def _get_function_args(func, exclude_args=()):
all_args = _get_function_args_internal(func)
if exclude_args:
exclude_args = set(exclude_args)
for arg_type in ('args', 'kwargs'):
all_args[arg_type] = [arg for arg in all_args[arg_type]
if arg not in exclude_args]
for arg_type in ('varargs', 'varkwargs'):
if all_args[arg_type] in exclude_args:
all_args[arg_type] = None
return all_args
def format_doc(docstring, *args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
Examples
--------
Replacing the current docstring is very easy::
>>> from astropy.utils.decorators import format_doc
>>> @format_doc('''Perform num1 + num2''')
... def add(num1, num2):
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform num1 + num2
sometimes instead of replacing you only want to add to it::
>>> doc = '''
... {__doc__}
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... '''
>>> @format_doc(doc)
... def add(num1, num2):
... '''Perform addition.'''
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
in case one might want to format it further::
>>> doc = '''
... Perform {0}.
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... result of num1 {op} num2
... {__doc__}
... '''
>>> @format_doc(doc, 'addition', op='+')
... def add(num1, num2):
... return num1+num2
...
>>> @format_doc(doc, 'subtraction', op='-')
... def subtract(num1, num2):
... '''Notes: This one has additional notes.'''
... return num1-num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
>>> help(subtract) # doctest: +SKIP
Help on function subtract in module __main__:
<BLANKLINE>
subtract(num1, num2)
Perform subtraction.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 - num2
Notes : This one has additional notes.
These methods can be combined an even taking the docstring from another
object is possible as docstring attribute. You just have to specify the
object::
>>> @format_doc(add)
... def another_add(num1, num2):
... return num1 + num2
...
>>> help(another_add) # doctest: +SKIP
Help on function another_add in module __main__:
<BLANKLINE>
another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
But be aware that this decorator *only* formats the given docstring not
the strings passed as ``args`` or ``kwargs`` (not even the original
docstring)::
>>> @format_doc(doc, 'addition', op='+')
... def yet_another_add(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(yet_another_add) # doctest: +SKIP
Help on function yet_another_add in module __main__:
<BLANKLINE>
yet_another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
This one is good for {0}.
To work around it you could specify the docstring to be ``None``::
>>> @format_doc(None, 'addition')
... def last_add_i_swear(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(last_add_i_swear) # doctest: +SKIP
Help on function last_add_i_swear in module __main__:
<BLANKLINE>
last_add_i_swear(num1, num2)
This one is good for addition.
Using it with ``None`` as docstring allows to use the decorator twice
on an object to first parse the new docstring and then to parse the
original docstring or the ``args`` and ``kwargs``.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError('docstring must be a string or containing a '
'docstring that is not empty.')
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
4ea017b9ae5762e17f8819b456b17faa5cc37e5c4d011f76bb2729191407ac25 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions related to Python runtime introspection."""
import inspect
import re
import types
import importlib
from distutils.version import LooseVersion
__all__ = ['resolve_name', 'minversion', 'find_current_module',
'isinstancemethod']
__doctest_skip__ = ['find_current_module']
def resolve_name(name, *additional_parts):
"""Resolve a name like ``module.object`` to an object and return it.
This ends up working like ``from module import object`` but is easier
to deal with than the `__import__` builtin and supports digging into
submodules.
Parameters
----------
name : `str`
A dotted path to a Python object--that is, the name of a function,
class, or other object in a module with the full path to that module,
including parent modules, separated by dots. Also known as the fully
qualified name of the object.
additional_parts : iterable, optional
If more than one positional arguments are given, those arguments are
automatically dotted together with ``name``.
Examples
--------
>>> resolve_name('astropy.utils.introspection.resolve_name')
<function resolve_name at 0x...>
>>> resolve_name('astropy', 'utils', 'introspection', 'resolve_name')
<function resolve_name at 0x...>
Raises
------
`ImportError`
If the module or named object is not found.
"""
additional_parts = '.'.join(additional_parts)
if additional_parts:
name = name + '.' + additional_parts
parts = name.split('.')
if len(parts) == 1:
# No dots in the name--just a straight up module import
cursor = 1
fromlist = []
else:
cursor = len(parts) - 1
fromlist = [parts[-1]]
module_name = parts[:cursor]
while cursor > 0:
try:
ret = __import__(str('.'.join(module_name)), fromlist=fromlist)
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
fromlist = [parts[cursor]]
ret = ''
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
def minversion(module, version, inclusive=True, version_path='__version__'):
"""
Returns `True` if the specified Python module satisfies a minimum version
requirement, and `False` if not.
Parameters
----------
module : module or `str`
An imported module of which to check the version, or the name of
that module (in which case an import of that module is attempted--
if this fails `False` is returned).
version : `str`
The version as a string that this module must have at a minimum (e.g.
``'0.12'``).
inclusive : `bool`
The specified version meets the requirement inclusively (i.e. ``>=``)
as opposed to strictly greater than (default: `True`).
version_path : `str`
A dotted attribute path to follow in the module for the version.
Defaults to just ``'__version__'``, which should work for most Python
modules.
Examples
--------
>>> import astropy
>>> minversion(astropy, '0.4.4')
True
"""
if isinstance(module, types.ModuleType):
module_name = module.__name__
elif isinstance(module, str):
module_name = module
try:
module = resolve_name(module_name)
except ImportError:
return False
else:
raise ValueError('module argument must be an actual imported '
'module, or the import name of the module; '
'got {0!r}'.format(module))
if '.' not in version_path:
have_version = getattr(module, version_path)
else:
have_version = resolve_name(module.__name__, version_path)
# LooseVersion raises a TypeError when strings like dev, rc1 are part
# of the version number. Match the dotted numbers only. Regex taken
# from PEP440, https://www.python.org/dev/peps/pep-0440/, Appendix B
expr = '^([1-9]\\d*!)?(0|[1-9]\\d*)(\\.(0|[1-9]\\d*))*'
m = re.match(expr, version)
if m:
version = m.group(0)
if inclusive:
return LooseVersion(have_version) >= LooseVersion(version)
else:
return LooseVersion(have_version) > LooseVersion(version)
def find_current_module(depth=1, finddiff=False):
"""
Determines the module/package from which this function is called.
This function has two modes, determined by the ``finddiff`` option. it
will either simply go the requested number of frames up the call
stack (if ``finddiff`` is False), or it will go up the call stack until
it reaches a module that is *not* in a specified set.
Parameters
----------
depth : int
Specifies how far back to go in the call stack (0-indexed, so that
passing in 0 gives back `astropy.utils.misc`).
finddiff : bool or list
If False, the returned ``mod`` will just be ``depth`` frames up from
the current frame. Otherwise, the function will start at a frame
``depth`` up from current, and continue up the call stack to the
first module that is *different* from those in the provided list.
In this case, ``finddiff`` can be a list of modules or modules
names. Alternatively, it can be True, which will use the module
``depth`` call stack frames up as the module the returned module
most be different from.
Returns
-------
mod : module or None
The module object or None if the package cannot be found. The name of
the module is available as the ``__name__`` attribute of the returned
object (if it isn't None).
Raises
------
ValueError
If ``finddiff`` is a list with an invalid entry.
Examples
--------
The examples below assume that there are two modules in a package named
``pkg``. ``mod1.py``::
def find1():
from astropy.utils import find_current_module
print find_current_module(1).__name__
def find2():
from astropy.utils import find_current_module
cmod = find_current_module(2)
if cmod is None:
print 'None'
else:
print cmod.__name__
def find_diff():
from astropy.utils import find_current_module
print find_current_module(0,True).__name__
``mod2.py``::
def find():
from .mod1 import find2
find2()
With these modules in place, the following occurs::
>>> from pkg import mod1, mod2
>>> from astropy.utils import find_current_module
>>> mod1.find1()
pkg.mod1
>>> mod1.find2()
None
>>> mod2.find()
pkg.mod2
>>> find_current_module(0)
<module 'astropy.utils.misc' from 'astropy/utils/misc.py'>
>>> mod1.find_diff()
pkg.mod1
"""
frm = inspect.currentframe()
for i in range(depth):
frm = frm.f_back
if frm is None:
return None
if finddiff:
currmod = inspect.getmodule(frm)
if finddiff is True:
diffmods = [currmod]
else:
diffmods = []
for fd in finddiff:
if inspect.ismodule(fd):
diffmods.append(fd)
elif isinstance(fd, str):
diffmods.append(importlib.import_module(fd))
elif fd is True:
diffmods.append(currmod)
else:
raise ValueError('invalid entry in finddiff')
while frm:
frmb = frm.f_back
modb = inspect.getmodule(frmb)
if modb not in diffmods:
return modb
frm = frmb
else:
return inspect.getmodule(frm)
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
``modname``, nor does it include private attributes (those that
begin with '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool or list of str
If `True`, only attributes that are either members of ``modname`` OR
one of its modules or subpackages will be included. If it is a list
of strings, those specify the possible packages that will be
considered "local".
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module ``modname`` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.introspection.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for functions or
classes it can be different if they are actually defined elsewhere and
just referenced in ``modname``.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
mod = resolve_name(modname)
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
if onlylocals is True:
onlylocals = [modname]
valids = [any(fqn.startswith(nm) for nm in onlylocals) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
# Note: I would have preferred call this is_instancemethod, but this naming is
# for consistency with other functions in the `inspect` module
def isinstancemethod(cls, obj):
"""
Returns `True` if the given object is an instance method of the class
it is defined on (as opposed to a `staticmethod` or a `classmethod`).
This requires both the class the object is a member of as well as the
object itself in order to make this determination.
Parameters
----------
cls : `type`
The class on which this method was defined.
obj : `object`
A member of the provided class (the membership is not checked directly,
but this function will always return `False` if the given object is not
a member of the given class).
Examples
--------
>>> class MetaClass(type):
... def a_classmethod(cls): pass
...
>>> class MyClass(metaclass=MetaClass):
... def an_instancemethod(self): pass
...
... @classmethod
... def another_classmethod(cls): pass
...
... @staticmethod
... def a_staticmethod(): pass
...
>>> isinstancemethod(MyClass, MyClass.a_classmethod)
False
>>> isinstancemethod(MyClass, MyClass.another_classmethod)
False
>>> isinstancemethod(MyClass, MyClass.a_staticmethod)
False
>>> isinstancemethod(MyClass, MyClass.an_instancemethod)
True
"""
return _isinstancemethod(cls, obj)
def _isinstancemethod(cls, obj):
if not isinstance(obj, types.FunctionType):
return False
# Unfortunately it seems the easiest way to get to the original
# staticmethod object is to look in the class's __dict__, though we
# also need to look up the MRO in case the method is not in the given
# class's dict
name = obj.__name__
for basecls in cls.mro(): # This includes cls
if name in basecls.__dict__:
return not isinstance(basecls.__dict__[name], staticmethod)
# This shouldn't happen, though this is the most sensible response if
# it does.
raise AttributeError(name)
|
03e5458bcf3557aa5092819d8f54a452520a85f91c2f6a6548f831ddd6ea890f | import difflib
import functools
import sys
import numbers
import numpy as np
from .misc import indent
__all__ = ['fixed_width_indent', 'diff_values', 'report_diff_values',
'where_not_allclose']
# Smaller default shift-width for indent
fixed_width_indent = functools.partial(indent, width=2)
def diff_values(a, b, rtol=0.0, atol=0.0):
"""
Diff two scalar values. If both values are floats, they are compared to
within the given absolute and relative tolerance.
Parameters
----------
a, b : int, float, str
Scalar values to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
is_different : bool
`True` if they are different, else `False`.
"""
if isinstance(a, float) and isinstance(b, float):
if np.isnan(a) and np.isnan(b):
return False
return not np.allclose(a, b, rtol=rtol, atol=atol)
else:
return a != b
def report_diff_values(a, b, fileobj=sys.stdout, indent_width=0):
"""
Write a diff report between two values to the specified file-like object.
Parameters
----------
a, b
Values to compare. Anything that can be turned into strings
and compared using :py:mod:`difflib` should work.
fileobj : obj
File-like object to write to.
The default is ``sys.stdout``, which writes to terminal.
indent_width : int
Character column(s) to indent.
Returns
-------
identical : bool
`True` if no diff, else `False`.
"""
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
if a.shape != b.shape:
fileobj.write(
fixed_width_indent(' Different array shapes:\n',
indent_width))
report_diff_values(str(a.shape), str(b.shape), fileobj=fileobj,
indent_width=indent_width + 1)
return False
diff_indices = np.transpose(np.where(a != b))
num_diffs = diff_indices.shape[0]
for idx in diff_indices[:3]:
lidx = idx.tolist()
fileobj.write(
fixed_width_indent(' at {!r}:\n'.format(lidx), indent_width))
report_diff_values(a[tuple(idx)], b[tuple(idx)], fileobj=fileobj,
indent_width=indent_width + 1)
if num_diffs > 3:
fileobj.write(fixed_width_indent(
' ...and at {:d} more indices.\n'.format(num_diffs - 3),
indent_width))
return False
return num_diffs == 0
typea = type(a)
typeb = type(b)
if typea == typeb:
lnpad = ' '
sign_a = 'a>'
sign_b = 'b>'
if isinstance(a, numbers.Number):
a = repr(a)
b = repr(b)
else:
a = str(a)
b = str(b)
else:
padding = max(len(typea.__name__), len(typeb.__name__)) + 3
lnpad = (padding + 1) * ' '
sign_a = ('(' + typea.__name__ + ') ').rjust(padding) + 'a>'
sign_b = ('(' + typeb.__name__ + ') ').rjust(padding) + 'b>'
is_a_str = isinstance(a, str)
is_b_str = isinstance(b, str)
a = (repr(a) if ((is_a_str and not is_b_str) or
(not is_a_str and isinstance(a, numbers.Number)))
else str(a))
b = (repr(b) if ((is_b_str and not is_a_str) or
(not is_b_str and isinstance(b, numbers.Number)))
else str(b))
identical = True
for line in difflib.ndiff(a.splitlines(), b.splitlines()):
if line[0] == '-':
identical = False
line = sign_a + line[1:]
elif line[0] == '+':
identical = False
line = sign_b + line[1:]
else:
line = lnpad + line
fileobj.write(fixed_width_indent(
' {}\n'.format(line.rstrip('\n')), indent_width))
return identical
def where_not_allclose(a, b, rtol=1e-5, atol=1e-8):
"""
A version of :func:`numpy.allclose` that returns the indices
where the two arrays differ, instead of just a boolean value.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
idx : tuple of arrays
Indices where the two arrays differ.
"""
# Create fixed mask arrays to handle INF and NaN; currently INF and NaN
# are handled as equivalent
if not np.all(np.isfinite(a)):
a = np.ma.fix_invalid(a).data
if not np.all(np.isfinite(b)):
b = np.ma.fix_invalid(b).data
if atol == 0.0 and rtol == 0.0:
# Use a faster comparison for the most simple (and common) case
return np.where(a != b)
return np.where(np.abs(a - b) > (atol + rtol * np.abs(b)))
|
ad0a2bf34b578caab37e4cb9385437581cbdb173e28723e1320e34611a386a5f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from distutils.core import Extension
from os.path import dirname, join, relpath
ASTROPY_UTILS_ROOT = dirname(__file__)
def get_extensions():
return [
Extension('astropy.utils._compiler',
[relpath(join(ASTROPY_UTILS_ROOT, 'src', 'compiler.c'))])
]
def get_package_data():
# Installs the testing data files
return {
'astropy.utils.tests': [
'data/test_package/*.py',
'data/test_package/data/*.txt',
'data/dataurl/index.html',
'data/dataurl_mirror/index.html',
'data/*.dat',
'data/*.txt',
'data/*.gz',
'data/*.bz2',
'data/*.xz',
'data/.hidden_file.txt',
'data/*.cfg'],
'astropy.utils.iers': [
'data/ReadMe.eopc04_IAU2000',
'data/ReadMe.finals2000A',
'data/eopc04_IAU2000.62-now',
'tests/finals2000A-2016-04-30-test',
'tests/finals2000A-2016-02-30-test',
'tests/iers_a_excerpt']
}
|
b054eada7e188bd00add051852927b597a352b167a1ce95fe8de78fe40e27582 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains functions and methods that relate to the DataInfo class
which provides a container for informational attributes as well as summary info
methods.
A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in
astropy. Here it allows those classes to be used in Tables and uniformly carry
table column attributes such as name, format, dtype, meta, and description.
"""
# Note: these functions and classes are tested extensively in astropy table
# tests via their use in providing mixin column info, and in
# astropy/tests/test_info for providing table and column info summary data.
import os
import re
import sys
import weakref
import warnings
from io import StringIO
from copy import deepcopy
from functools import partial
from collections import OrderedDict
from contextlib import contextmanager
import numpy as np
from . import metadata
__all__ = ['data_info_factory', 'dtype_info_name', 'BaseColumnInfo',
'DataInfo', 'MixinInfo', 'ParentDtypeInfo']
# Tuple of filterwarnings kwargs to ignore when calling info
IGNORE_WARNINGS = (dict(category=RuntimeWarning, message='All-NaN|'
'Mean of empty slice|Degrees of freedom <= 0'),)
STRING_TYPE_NAMES = {(False, 'S'): 'str', # not PY3
(False, 'U'): 'unicode',
(True, 'S'): 'bytes', # PY3
(True, 'U'): 'str'}
@contextmanager
def serialize_context_as(context):
"""Set context for serialization.
This will allow downstream code to understand the context in which a column
is being serialized. Objects like Time or SkyCoord will have different
default serialization representations depending on context.
Parameters
----------
context : str
Context name, e.g. 'fits', 'hdf5', 'ecsv', 'yaml'
"""
old_context = BaseColumnInfo._serialize_context
BaseColumnInfo._serialize_context = context
yield
BaseColumnInfo._serialize_context = old_context
def dtype_info_name(dtype):
"""Return a human-oriented string name of the ``dtype`` arg.
This can be use by astropy methods that present type information about
a data object.
The output is mostly equivalent to ``dtype.name`` which takes the form
<type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an
optional number of bits which gets included only for numeric types.
For bytes, string and unicode types, the output is shown below, where <N>
is the number of characters. This representation corresponds to the Python
type that matches the dtype::
Numpy S<N> U<N>
Python bytes<N> str<N>
Parameters
----------
dtype : str, np.dtype, type
Input dtype as an object that can be converted via np.dtype()
Returns
-------
dtype_info_name : str
String name of ``dtype``
"""
dtype = np.dtype(dtype)
if dtype.kind in ('S', 'U'):
length = re.search(r'(\d+)', dtype.str).group(1)
type_name = STRING_TYPE_NAMES[(True, dtype.kind)]
out = type_name + length
else:
out = dtype.name
return out
def data_info_factory(names, funcs):
"""
Factory to create a function that can be used as an ``option``
for outputting data object summary information.
Examples
--------
>>> from astropy.utils.data_info import data_info_factory
>>> from astropy.table import Column
>>> c = Column([4., 3., 2., 1.])
>>> mystats = data_info_factory(names=['min', 'median', 'max'],
... funcs=[np.min, np.median, np.max])
>>> c.info(option=mystats)
min = 1.0
median = 2.5
max = 4.0
n_bad = 0
length = 4
Parameters
----------
names : list
List of information attribute names
funcs : list
List of functions that compute the corresponding information attribute
Returns
-------
func : function
Function that can be used as a data info option
"""
def func(dat):
outs = []
for name, func in zip(names, funcs):
try:
if isinstance(func, str):
out = getattr(dat, func)()
else:
out = func(dat)
except Exception:
outs.append('--')
else:
outs.append(str(out))
return OrderedDict(zip(names, outs))
return func
def _get_obj_attrs_map(obj, attrs):
"""
Get the values for object ``attrs`` and return as a dict. This
ignores any attributes that are None and in Py2 converts any unicode
attribute names or values to str. In the context of serializing the
supported core astropy classes this conversion will succeed and results
in more succinct and less python-specific YAML.
"""
out = {}
for attr in attrs:
val = getattr(obj, attr, None)
if val is not None:
out[attr] = val
return out
def _get_data_attribute(dat, attr=None):
"""
Get a data object attribute for the ``attributes`` info summary method
"""
if attr == 'class':
val = type(dat).__name__
elif attr == 'dtype':
val = dtype_info_name(dat.info.dtype)
elif attr == 'shape':
datshape = dat.shape[1:]
val = datshape if datshape else ''
else:
val = getattr(dat.info, attr)
if val is None:
val = ''
return str(val)
class DataInfo:
"""
Descriptor that data classes use to add an ``info`` attribute for storing
data attributes in a uniform and portable way. Note that it *must* be
called ``info`` so that the DataInfo() object can be stored in the
``instance`` using the ``info`` key. Because owner_cls.x is a descriptor,
Python doesn't use __dict__['x'] normally, and the descriptor can safely
store stuff there. Thanks to http://nbviewer.ipython.org/urls/
gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb for
this trick that works for non-hashable classes.
Parameters
----------
bound : bool
If True this is a descriptor attribute in a class definition, else it
is a DataInfo() object that is bound to a data object instance. Default is False.
"""
_stats = ['mean', 'std', 'min', 'max']
attrs_from_parent = set()
attr_names = set(['name', 'unit', 'dtype', 'format', 'description', 'meta'])
_attrs_no_copy = set()
_info_summary_attrs = ('dtype', 'shape', 'unit', 'format', 'description', 'class')
_parent_ref = None
# This specifies the list of object attributes which must be stored in
# order to re-create the object after serialization. This is independent
# of normal `info` attributes like name or description. Subclasses will
# generally either define this statically (QuantityInfo) or dynamically
# (SkyCoordInfo). These attributes may be scalars or arrays. If arrays
# that match the object length they will be serialized as an independent
# column.
_represent_as_dict_attrs = ()
# This specifies attributes which are to be provided to the class
# initializer as ordered args instead of keyword args. This is needed
# for Quantity subclasses where the keyword for data varies (e.g.
# between Quantity and Angle).
_construct_from_dict_args = ()
# This specifies the name of an attribute which is the "primary" data.
# Then when representing as columns
# (table.serialize._represent_mixin_as_column) the output for this
# attribute will be written with the just name of the mixin instead of the
# usual "<name>.<attr>".
_represent_as_dict_primary_data = None
def __init__(self, bound=False):
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
self._attrs = dict((attr, None) for attr in self.attr_names)
@property
def _parent(self):
if self._parent_ref is None:
return None
else:
parent = self._parent_ref()
if parent is not None:
return parent
else:
raise AttributeError("""\
failed access "info" attribute on a temporary object.
It looks like you have done something like ``col[3:5].info``, i.e.
you accessed ``info`` from a temporary slice object ``col[3:5]`` that
only exists momentarily. This has failed because the reference to
that temporary object is now lost. Instead force a permanent
reference with ``c = col[3:5]`` followed by ``c.info``.""")
@_parent.setter
def _parent(self, value):
if value is None:
self._parent_ref = None
else:
self._parent_ref = weakref.ref(value)
def __get__(self, instance, owner_cls):
if instance is None:
# This is an unbound descriptor on the class
info = self
info._parent_cls = owner_cls
else:
info = instance.__dict__.get('info')
if info is None:
info = instance.__dict__['info'] = self.__class__(bound=True)
info._parent = instance
return info
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError('cannot set unbound descriptor')
if isinstance(value, DataInfo):
info = instance.__dict__['info'] = self.__class__(bound=True)
for attr in info.attr_names - info.attrs_from_parent - info._attrs_no_copy:
info._attrs[attr] = deepcopy(getattr(value, attr))
else:
raise TypeError('info must be set with a DataInfo instance')
def __getstate__(self):
return self._attrs
def __setstate__(self, state):
self._attrs = state
def __getattr__(self, attr):
if attr.startswith('_'):
return super().__getattribute__(attr)
if attr in self.attrs_from_parent:
return getattr(self._parent, attr)
try:
value = self._attrs[attr]
except KeyError:
super().__getattribute__(attr) # Generate AttributeError
# Weak ref for parent table
if attr == 'parent_table' and callable(value):
value = value()
# Mixins have a default dtype of Object if nothing else was set
if attr == 'dtype' and value is None:
value = np.dtype('O')
return value
def __setattr__(self, attr, value):
propobj = getattr(self.__class__, attr, None)
# If attribute is taken from parent properties and there is not a
# class property (getter/setter) for this attribute then set
# attribute directly in parent.
if attr in self.attrs_from_parent and not isinstance(propobj, property):
setattr(self._parent, attr, value)
return
# Check if there is a property setter and use it if possible.
if isinstance(propobj, property):
if propobj.fset is None:
raise AttributeError("can't set attribute")
propobj.fset(self, value)
return
# Private attr names get directly set
if attr.startswith('_'):
super().__setattr__(attr, value)
return
# Finally this must be an actual data attribute that this class is handling.
if attr not in self.attr_names:
raise AttributeError("attribute must be one of {0}".format(self.attr_names))
if attr == 'parent_table':
value = None if value is None else weakref.ref(value)
self._attrs[attr] = value
def _represent_as_dict(self):
"""Get the values for the parent ``attrs`` and return as a dict."""
return _get_obj_attrs_map(self._parent, self._represent_as_dict_attrs)
def _construct_from_dict(self, map):
args = [map.pop(attr) for attr in self._construct_from_dict_args]
return self._parent_cls(*args, **map)
info_summary_attributes = staticmethod(
data_info_factory(names=_info_summary_attrs,
funcs=[partial(_get_data_attribute, attr=attr)
for attr in _info_summary_attrs]))
# No nan* methods in numpy < 1.8
info_summary_stats = staticmethod(
data_info_factory(names=_stats,
funcs=[getattr(np, 'nan' + stat)
for stat in _stats]))
def __call__(self, option='attributes', out=''):
"""
Write summary information about data object to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: data object attributes like ``dtype`` and ``format``
- ``stats``: basic statistics: min, mean, and max
If a function is specified then that function will be called with the
data object as its single argument. The function must return an
OrderedDict containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table import Column
>>> c = Column([1, 2], unit='m', dtype='int32')
>>> c.info()
dtype = int32
unit = m
class = Column
n_bad = 0
length = 2
>>> c.info(['attributes', 'stats'])
dtype = int32
unit = m
class = Column
mean = 1.5
std = 0.5
min = 1
max = 2
n_bad = 0
length = 2
Parameters
----------
option : str, function, list of (str or function)
Info option, defaults to 'attributes'.
out : file-like object, None
Output destination, defaults to sys.stdout. If None then the
OrderedDict with information attributes is returned
Returns
-------
info : OrderedDict if out==None else None
"""
if out == '':
out = sys.stdout
dat = self._parent
info = OrderedDict()
name = dat.info.name
if name is not None:
info['name'] = name
options = option if isinstance(option, (list, tuple)) else [option]
for option in options:
if isinstance(option, str):
if hasattr(self, 'info_summary_' + option):
option = getattr(self, 'info_summary_' + option)
else:
raise ValueError('option={0} is not an allowed information type'
.format(option))
with warnings.catch_warnings():
for ignore_kwargs in IGNORE_WARNINGS:
warnings.filterwarnings('ignore', **ignore_kwargs)
info.update(option(dat))
if hasattr(dat, 'mask'):
n_bad = np.count_nonzero(dat.mask)
else:
try:
n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat))
except Exception:
n_bad = 0
info['n_bad'] = n_bad
try:
info['length'] = len(dat)
except TypeError:
pass
if out is None:
return info
for key, val in info.items():
if val != '':
out.write('{0} = {1}'.format(key, val) + os.linesep)
def __repr__(self):
if self._parent is None:
return super().__repr__()
out = StringIO()
self.__call__(out=out)
return out.getvalue()
class BaseColumnInfo(DataInfo):
"""
Base info class for anything that can be a column in an astropy
Table. There are at least two classes that inherit from this:
ColumnInfo: for native astropy Column / MaskedColumn objects
MixinInfo: for mixin column objects
Note that this class is defined here so that mixins can use it
without importing the table package.
"""
attr_names = DataInfo.attr_names.union(['parent_table', 'indices'])
_attrs_no_copy = set(['parent_table'])
# Context for serialization. This can be set temporarily via
# ``serialize_context_as(context)`` context manager to allow downstream
# code to understand the context in which a column is being serialized.
# Typical values are 'fits', 'hdf5', 'ecsv', 'yaml'. Objects like Time or
# SkyCoord will have different default serialization representations
# depending on context.
_serialize_context = None
def __init__(self, bound=False):
super().__init__(bound=bound)
# If bound to a data object instance then add a _format_funcs dict
# for caching functions for print formatting.
if bound:
self._format_funcs = {}
def iter_str_vals(self):
"""
This is a mixin-safe version of Column.iter_str_vals.
"""
col = self._parent
if self.parent_table is None:
from ..table.column import FORMATTER as formatter
else:
formatter = self.parent_table.formatter
_pformat_col_iter = formatter._pformat_col_iter
for str_val in _pformat_col_iter(col, -1, False, False, {}):
yield str_val
def adjust_indices(self, index, value, col_len):
'''
Adjust info indices after column modification.
Parameters
----------
index : slice, int, list, or ndarray
Element(s) of column to modify. This parameter can
be a single row number, a list of row numbers, an
ndarray of row numbers, a boolean ndarray (a mask),
or a column slice.
value : int, list, or ndarray
New value(s) to insert
col_len : int
Length of the column
'''
if not self.indices:
return
if isinstance(index, slice):
# run through each key in slice
t = index.indices(col_len)
keys = list(range(*t))
elif isinstance(index, np.ndarray) and index.dtype.kind == 'b':
# boolean mask
keys = np.where(index)[0]
else: # single int
keys = [index]
value = np.atleast_1d(value) # turn array(x) into array([x])
if value.size == 1:
# repeat single value
value = list(value) * len(keys)
for key, val in zip(keys, value):
for col_index in self.indices:
col_index.replace(key, self.name, val)
def slice_indices(self, col_slice, item, col_len):
'''
Given a sliced object, modify its indices
to correctly represent the slice.
Parameters
----------
col_slice : Column or mixin
Sliced object
item : slice, list, or ndarray
Slice used to create col_slice
col_len : int
Length of original object
'''
from ..table.sorted_array import SortedArray
if not getattr(self, '_copy_indices', True):
# Necessary because MaskedArray will perform a shallow copy
col_slice.info.indices = []
return col_slice
elif isinstance(item, slice):
col_slice.info.indices = [x[item] for x in self.indices]
elif self.indices:
if isinstance(item, np.ndarray) and item.dtype.kind == 'b':
# boolean mask
item = np.where(item)[0]
threshold = 0.6
# Empirical testing suggests that recreating a BST/RBT index is
# more effective than relabelling when less than ~60% of
# the total number of rows are involved, and is in general
# more effective for SortedArray.
small = len(item) <= 0.6 * col_len
col_slice.info.indices = []
for index in self.indices:
if small or isinstance(index, SortedArray):
new_index = index.get_slice(col_slice, item)
else:
new_index = deepcopy(index)
new_index.replace_rows(item)
col_slice.info.indices.append(new_index)
return col_slice
@staticmethod
def merge_cols_attributes(cols, metadata_conflicts, name, attrs):
"""
Utility method to merge and validate the attributes ``attrs`` for the
input table columns ``cols``.
Note that ``dtype`` and ``shape`` attributes are handled specially.
These should not be passed in ``attrs`` but will always be in the
returned dict of merged attributes.
Parameters
----------
cols : list
List of input Table column objects
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
attrs : list
List of attribute names to be merged
Returns
-------
attrs : dict of merged attributes
"""
from ..table.np_utils import TableMergeError
def warn_str_func(key, left, right):
out = ("In merged column '{}' the '{}' attribute does not match "
"({} != {}). Using {} for merged output"
.format(name, key, left, right, right))
return out
def getattrs(col):
return {attr: getattr(col.info, attr) for attr in attrs
if getattr(col.info, attr, None) is not None}
out = getattrs(cols[0])
for col in cols[1:]:
out = metadata.merge(out, getattrs(col), metadata_conflicts=metadata_conflicts,
warn_str_func=warn_str_func)
# Output dtype is the superset of all dtypes in in_cols
out['dtype'] = metadata.common_dtype(cols)
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in cols)
if len(uniq_shapes) != 1:
raise TableMergeError('columns have different shapes')
out['shape'] = uniq_shapes.pop()
return out
class MixinInfo(BaseColumnInfo):
def __setattr__(self, attr, value):
# For mixin columns that live within a table, rename the column in the
# table when setting the name attribute. This mirrors the same
# functionality in the BaseColumn class.
if attr == 'name' and self.parent_table is not None:
from ..table.np_utils import fix_column_name
new_name = fix_column_name(value) # Ensure col name is numpy compatible
self.parent_table.columns._rename_column(self.name, new_name)
super().__setattr__(attr, value)
class ParentDtypeInfo(MixinInfo):
"""Mixin that gets info.dtype from parent"""
attrs_from_parent = set(['dtype']) # dtype and unit taken from parent
|
b86c739a6d4311d7e2633c9e062a3bae9bbb9ca7a579bad6aa194b358037d9a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains helper functions for accessing, downloading, and
caching data files.
"""
import atexit
import contextlib
import fnmatch
import hashlib
import os
import io
import pathlib
import shutil
import socket
import sys
import time
import urllib.request
import urllib.error
import urllib.parse
import shelve
from tempfile import NamedTemporaryFile, gettempdir
from warnings import warn
from .. import config as _config
from ..utils.exceptions import AstropyWarning
from ..utils.introspection import find_current_module, resolve_name
__all__ = [
'Conf', 'conf', 'get_readable_fileobj', 'get_file_contents',
'get_pkg_data_fileobj', 'get_pkg_data_filename',
'get_pkg_data_contents', 'get_pkg_data_fileobjs',
'get_pkg_data_filenames', 'compute_hash', 'clear_download_cache',
'CacheMissingWarning', 'get_free_space_in_dir',
'check_free_space_in_dir', 'download_file',
'download_files_in_parallel', 'is_url_in_cache', 'get_cached_urls']
_dataurls_to_alias = {}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
'http://data.astropy.org/',
'Primary URL for astropy remote data site.')
dataurl_mirror = _config.ConfigItem(
'http://www.astropy.org/astropy-data/',
'Mirror URL for astropy remote data site.')
remote_timeout = _config.ConfigItem(
10.,
'Time to wait for remote data queries (in seconds).',
aliases=['astropy.coordinates.name_resolve.name_resolve_timeout'])
compute_hash_block_size = _config.ConfigItem(
2 ** 16, # 64K
'Block size for computing MD5 file hashes.')
download_block_size = _config.ConfigItem(
2 ** 16, # 64K
'Number of bytes of remote data to download per step.')
download_cache_lock_attempts = _config.ConfigItem(
5,
'Number of times to try to get the lock ' +
'while accessing the data cache before giving up.')
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
'If True, temporary download files created when the cache is '
'inaccessible will be deleted at the end of the python session.')
conf = Conf()
class CacheMissingWarning(AstropyWarning):
"""
This warning indicates the standard cache directory is not accessible, with
the first argument providing the warning message. If args[1] is present, it
is a filename indicating the path to a temporary file that was created to
store a remote data download in the absence of the cache.
"""
def _is_url(string):
"""
Test whether a string is a valid URL
Parameters
----------
string : str
The string to test
"""
url = urllib.parse.urlparse(string)
# we can't just check that url.scheme is not an empty string, because
# file paths in windows would return a non-empty scheme (e.g. e:\\
# returns 'e').
return url.scheme.lower() in ['http', 'https', 'ftp', 'sftp', 'ssh', 'file']
def _is_inside(path, parent_path):
# We have to try realpath too to avoid issues with symlinks, but we leave
# abspath because some systems like debian have the absolute path (with no
# symlinks followed) match, but the real directories in different
# locations, so need to try both cases.
return os.path.abspath(path).startswith(os.path.abspath(parent_path)) \
or os.path.realpath(path).startswith(os.path.realpath(parent_path))
@contextlib.contextmanager
def get_readable_fileobj(name_or_obj, encoding=None, cache=False,
show_progress=True, remote_timeout=None):
"""
Given a filename, pathlib.Path object or a readable file-like object, return a context
manager that yields a readable file-like object.
This supports passing filenames, URLs, and readable file-like objects,
any of which can be compressed in gzip, bzip2 or lzma (xz) if the
appropriate compression libraries are provided by the Python installation.
Notes
-----
This function is a context manager, and should be used for example
as::
with get_readable_fileobj('file.dat') as f:
contents = f.read()
Parameters
----------
name_or_obj : str or file-like object
The filename of the file to access (if given as a string), or
the file-like object to access.
If a file-like object, it must be opened in binary mode.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool, optional
Whether to cache the contents of remote URLs.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`, which is 3s by default)
Returns
-------
file : readable file-like object
"""
# close_fds is a list of file handles created by this function
# that need to be closed. We don't want to always just close the
# returned file handle, because it may simply be the file handle
# passed in. In that case it is not the responsibility of this
# function to close it: doing so could result in a "double close"
# and an "invalid file descriptor" exception.
PATH_TYPES = (str, pathlib.Path)
close_fds = []
delete_fds = []
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
# Get a file object to the content
if isinstance(name_or_obj, PATH_TYPES):
# name_or_obj could be a Path object if pathlib is available
name_or_obj = str(name_or_obj)
is_url = _is_url(name_or_obj)
if is_url:
name_or_obj = download_file(
name_or_obj, cache=cache, show_progress=show_progress,
timeout=remote_timeout)
fileobj = io.FileIO(name_or_obj, 'r')
if is_url and not cache:
delete_fds.append(fileobj)
close_fds.append(fileobj)
else:
fileobj = name_or_obj
# Check if the file object supports random access, and if not,
# then wrap it in a BytesIO buffer. It would be nicer to use a
# BufferedReader to avoid reading loading the whole file first,
# but that is not compatible with streams or urllib2.urlopen
# objects on Python 2.x.
if not hasattr(fileobj, 'seek'):
fileobj = io.BytesIO(fileobj.read())
# Now read enough bytes to look at signature
signature = fileobj.read(4)
fileobj.seek(0)
if signature[:3] == b'\x1f\x8b\x08': # gzip
import struct
try:
import gzip
fileobj_new = gzip.GzipFile(fileobj=fileobj, mode='rb')
fileobj_new.read(1) # need to check that the file is really gzip
except (OSError, EOFError, struct.error): # invalid gzip file
fileobj.seek(0)
fileobj_new.close()
else:
fileobj_new.seek(0)
fileobj = fileobj_new
elif signature[:3] == b'BZh': # bzip2
try:
import bz2
except ImportError:
for fd in close_fds:
fd.close()
raise ValueError(
".bz2 format files are not supported since the Python "
"interpreter does not include the bz2 module")
try:
# bz2.BZ2File does not support file objects, only filenames, so we
# need to write the data to a temporary file
with NamedTemporaryFile("wb", delete=False) as tmp:
tmp.write(fileobj.read())
tmp.close()
fileobj_new = bz2.BZ2File(tmp.name, mode='rb')
fileobj_new.read(1) # need to check that the file is really bzip2
except OSError: # invalid bzip2 file
fileobj.seek(0)
fileobj_new.close()
# raise
else:
fileobj_new.seek(0)
close_fds.append(fileobj_new)
fileobj = fileobj_new
elif signature[:3] == b'\xfd7z': # xz
try:
import lzma
fileobj_new = lzma.LZMAFile(fileobj, mode='rb')
fileobj_new.read(1) # need to check that the file is really xz
except ImportError:
for fd in close_fds:
fd.close()
raise ValueError(
".xz format files are not supported since the Python "
"interpreter does not include the lzma module.")
except (OSError, EOFError) as e: # invalid xz file
fileobj.seek(0)
fileobj_new.close()
# should we propagate this to the caller to signal bad content?
# raise ValueError(e)
else:
fileobj_new.seek(0)
fileobj = fileobj_new
# By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
# or lzma.LZMAFile instance opened in binary mode (that is, read
# returns bytes). Now we need to, if requested, wrap it in a
# io.TextIOWrapper so read will return unicode based on the
# encoding parameter.
needs_textio_wrapper = encoding != 'binary'
if needs_textio_wrapper:
# A bz2.BZ2File can not be wrapped by a TextIOWrapper,
# so we decompress it to a temporary file and then
# return a handle to that.
try:
import bz2
except ImportError:
pass
else:
if isinstance(fileobj, bz2.BZ2File):
tmp = NamedTemporaryFile("wb", delete=False)
data = fileobj.read()
tmp.write(data)
tmp.close()
delete_fds.append(tmp)
fileobj = io.FileIO(tmp.name, 'r')
close_fds.append(fileobj)
fileobj = io.BufferedReader(fileobj)
fileobj = io.TextIOWrapper(fileobj, encoding=encoding)
# Ensure that file is at the start - io.FileIO will for
# example not always be at the start:
# >>> import io
# >>> f = open('test.fits', 'rb')
# >>> f.read(4)
# 'SIMP'
# >>> f.seek(0)
# >>> fileobj = io.FileIO(f.fileno())
# >>> fileobj.tell()
# 4096L
fileobj.seek(0)
try:
yield fileobj
finally:
for fd in close_fds:
fd.close()
for fd in delete_fds:
os.remove(fd.name)
def get_file_contents(*args, **kwargs):
"""
Retrieves the contents of a filename or file-like object.
See the `get_readable_fileobj` docstring for details on parameters.
Returns
-------
content
The content of the file (as requested by ``encoding``).
"""
with get_readable_fileobj(*args, **kwargs) as f:
return f.read()
@contextlib.contextmanager
def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations for the package and
provides the file as a file-like object that reads bytes.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
fileobj : file-like
An object with the contents of the data file available via
``read`` function. Can be used as part of a ``with`` statement,
automatically closing itself after the ``with`` block.
Raises
------
urllib2.URLError, urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Examples
--------
This will retrieve a data file and its contents for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('data/3d_cd.hdr',
... package='astropy.wcs.tests') as fobj:
... fcontents = fobj.read()
...
This next example would download a data file from the astropy data server
because the ``allsky/allsky_rosat.fits`` file is not present in the
source distribution. It will also save the file locally so the
next time it is accessed it won't need to be downloaded.::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
This does the same thing but does *not* cache it locally::
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_filename : returns a local name for a file containing the data
"""
datafn = _find_pkg_data_path(data_name, package=package)
if os.path.isdir(datafn):
raise OSError("Tried to access a data file that's actually "
"a package data directory")
elif os.path.isfile(datafn): # local file
with get_readable_fileobj(datafn, encoding=encoding) as fileobj:
yield fileobj
else: # remote file
all_urls = (conf.dataurl, conf.dataurl_mirror)
for url in all_urls:
try:
with get_readable_fileobj(url + data_name, encoding=encoding,
cache=cache) as fileobj:
# We read a byte to trigger any URLErrors
fileobj.read(1)
fileobj.seek(0)
yield fileobj
break
except urllib.error.URLError:
pass
else:
urls = '\n'.join(' - {0}'.format(url) for url in all_urls)
raise urllib.error.URLError("Failed to download {0} from the following "
"repositories:\n\n{1}".format(data_name, urls))
def get_pkg_data_filename(data_name, package=None, show_progress=True,
remote_timeout=None):
"""
Retrieves a data file from the standard locations for the package and
provides a local filename for the data.
This function is similar to `get_pkg_data_fileobj` but returns the
file *name* instead of a readable file-like object. This means
that this function must always cache remote files locally, unlike
`get_pkg_data_fileobj`.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for the requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`, which
is 3s by default)
Raises
------
urllib2.URLError, urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Returns
-------
filename : str
A file path on the local file system corresponding to the data
requested in ``data_name``.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('data/3d_cd.hdr',
... package='astropy.wcs.tests')
>>> with open(fn) as f:
... fcontents = f.read()
...
This retrieves a data file by hash either locally or from the astropy data
server::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP
>>> with open(fn) as f:
... fcontents = f.read()
...
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_fileobj : returns a file-like object with the data
"""
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
if data_name.startswith('hash/'):
# first try looking for a local version if a hash is specified
hashfn = _find_hash_fn(data_name[5:])
if hashfn is None:
all_urls = (conf.dataurl, conf.dataurl_mirror)
for url in all_urls:
try:
return download_file(url + data_name, cache=True,
show_progress=show_progress,
timeout=remote_timeout)
except urllib.error.URLError:
pass
urls = '\n'.join(' - {0}'.format(url) for url in all_urls)
raise urllib.error.URLError("Failed to download {0} from the following "
"repositories:\n\n{1}\n\n".format(data_name, urls))
else:
return hashfn
else:
fs_path = os.path.normpath(data_name)
datafn = _find_pkg_data_path(fs_path, package=package)
if os.path.isdir(datafn):
raise OSError("Tried to access a data file that's actually "
"a package data directory")
elif os.path.isfile(datafn): # local file
return datafn
else: # remote file
all_urls = (conf.dataurl, conf.dataurl_mirror)
for url in all_urls:
try:
return download_file(url + data_name, cache=True,
show_progress=show_progress,
timeout=remote_timeout)
except urllib.error.URLError:
pass
urls = '\n'.join(' - {0}'.format(url) for url in all_urls)
raise urllib.error.URLError("Failed to download {0} from the following "
"repositories:\n\n{1}".format(data_name, urls))
def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations and returns its
contents as a bytes object.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
* A URL to some other file.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
contents : bytes
The complete contents of the file as a bytes object.
Raises
------
urllib2.URLError, urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
See Also
--------
get_pkg_data_fileobj : returns a file-like object with the data
get_pkg_data_filename : returns a local name for a file containing the data
"""
with get_pkg_data_fileobj(data_name, package=package, encoding=encoding,
cache=cache) as fd:
contents = fd.read()
return contents
def get_pkg_data_filenames(datadir, package=None, pattern='*'):
"""
Returns the path of all of the data files in a given directory
that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``.
* Remote URLs are not currently supported.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
Returns
-------
filenames : iterator of str
Paths on the local filesystem in *datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fn in get_pkg_data_filenames('maps', 'astropy.wcs.tests',
... '*.hdr'):
... with open(fn) as f:
... fcontents = f.read()
...
"""
path = _find_pkg_data_path(datadir, package=package)
if os.path.isfile(path):
raise OSError(
"Tried to access a data directory that's actually "
"a package data file")
elif os.path.isdir(path):
for filename in os.listdir(path):
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(path, filename)
else:
raise OSError("Path not found")
def get_pkg_data_fileobjs(datadir, package=None, pattern='*', encoding=None):
"""
Returns readable file objects for all of the data files in a given
directory that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``
* Remote URLs are not currently supported
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
Returns
-------
fileobjs : iterator of file objects
File objects for each of the files on the local filesystem in
*datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fd in get_pkg_data_fileobjs('maps', 'astropy.wcs.tests',
... '*.hdr'):
... fcontents = fd.read()
...
"""
for fn in get_pkg_data_filenames(datadir, package=package,
pattern=pattern):
with get_readable_fileobj(fn, encoding=encoding) as fd:
yield fd
def compute_hash(localfn):
""" Computes the MD5 hash for a file.
The hash for a data file is used for looking up data files in a unique
fashion. This is of particular use for tests; a test may require a
particular version of a particular file, in which case it can be accessed
via hash to get the appropriate version.
Typically, if you wish to write a test that requires a particular data
file, you will want to submit that file to the astropy data servers, and
use
e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``,
but with the hash for your file in place of the hash in the example.
Parameters
----------
localfn : str
The path to the file for which the hash should be generated.
Returns
-------
md5hash : str
The hex digest of the MD5 hash for the contents of the ``localfn``
file.
"""
with open(localfn, 'rb') as f:
h = hashlib.md5()
block = f.read(conf.compute_hash_block_size)
while block:
h.update(block)
block = f.read(conf.compute_hash_block_size)
return h.hexdigest()
def _find_pkg_data_path(data_name, package=None):
"""
Look for data in the source-included data directories and return the
path.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return data_name
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
module = resolve_name(package)
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
module_path = os.path.dirname(module.__file__)
path = os.path.join(module_path, data_name)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(path, root_dir):
raise RuntimeError("attempted to get a local data file outside "
"of the {} tree.".format(rootpkgname))
return path
def _find_hash_fn(hash):
"""
Looks for a local file by hash - returns file name if found and a valid
file, otherwise returns None.
"""
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Could not access cache directory to search for data file: '
warn(CacheMissingWarning(msg + str(e)))
return None
hashfn = os.path.join(dldir, hash)
if os.path.isfile(hashfn):
return hashfn
else:
return None
def get_free_space_in_dir(path):
"""
Given a path to a directory, returns the amount of free space (in
bytes) on that filesystem.
Parameters
----------
path : str
The path to a directory
Returns
-------
bytes : int
The amount of free space on the partition that the directory
is on.
"""
if sys.platform.startswith('win'):
import ctypes
free_bytes = ctypes.c_ulonglong(0)
retval = ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(path), None, None, ctypes.pointer(free_bytes))
if retval == 0:
raise OSError('Checking free space on {!r} failed '
'unexpectedly.'.format(path))
return free_bytes.value
else:
stat = os.statvfs(path)
return stat.f_bavail * stat.f_frsize
def check_free_space_in_dir(path, size):
"""
Determines if a given directory has enough space to hold a file of
a given size. Raises an OSError if the file would be too large.
Parameters
----------
path : str
The path to a directory
size : int
A proposed filesize (in bytes)
Raises
-------
OSError : There is not enough room on the filesystem
"""
from ..utils.console import human_file_size
space = get_free_space_in_dir(path)
if space < size:
raise OSError(
"Not enough free space in '{0}' "
"to download a {1} file".format(
path, human_file_size(size)))
def download_file(remote_url, cache=False, show_progress=True, timeout=None):
"""
Accepts a URL, downloads and optionally caches the result
returning the filename, with a name determined by the file's MD5
hash. If ``cache=True`` and the file is present in the cache, just
returns the filename.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool, optional
Whether to use the cache
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
The timeout, in seconds. Otherwise, use
`astropy.utils.data.Conf.remote_timeout`.
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib2.URLError, urllib.error.URLError
Whenever there's a problem getting the remote file.
"""
from ..utils.console import ProgressBarOrSpinner
if timeout is None:
timeout = conf.remote_timeout
missing_cache = False
if cache:
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
cache = False
missing_cache = True # indicates that the cache is missing to raise a warning later
url_key = remote_url
# Check if URL is Astropy data server, which has alias, and cache it.
if (url_key.startswith(conf.dataurl) and
conf.dataurl not in _dataurls_to_alias):
with urllib.request.urlopen(conf.dataurl, timeout=timeout) as remote:
_dataurls_to_alias[conf.dataurl] = [conf.dataurl, remote.geturl()]
try:
if cache:
# We don't need to acquire the lock here, since we are only reading
with shelve.open(urlmapfn) as url2hash:
if url_key in url2hash:
return url2hash[url_key]
# If there is a cached copy from mirror, use it.
else:
for cur_url in _dataurls_to_alias.get(conf.dataurl, []):
if url_key.startswith(cur_url):
url_mirror = url_key.replace(cur_url,
conf.dataurl_mirror)
if url_mirror in url2hash:
return url2hash[url_mirror]
with urllib.request.urlopen(remote_url, timeout=timeout) as remote:
# keep a hash to rename the local file to the hashed name
hash = hashlib.md5()
info = remote.info()
if 'Content-Length' in info:
try:
size = int(info['Content-Length'])
except ValueError:
size = None
else:
size = None
if size is not None:
check_free_space_in_dir(gettempdir(), size)
if cache:
check_free_space_in_dir(dldir, size)
if show_progress and sys.stdout.isatty():
progress_stream = sys.stdout
else:
progress_stream = io.StringIO()
dlmsg = "Downloading {0}".format(remote_url)
with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
with NamedTemporaryFile(delete=False) as f:
try:
bytes_read = 0
block = remote.read(conf.download_block_size)
while block:
f.write(block)
hash.update(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(conf.download_block_size)
except BaseException:
if os.path.exists(f.name):
os.remove(f.name)
raise
if cache:
_acquire_download_cache_lock()
try:
with shelve.open(urlmapfn) as url2hash:
# We check now to see if another process has
# inadvertently written the file underneath us
# already
if url_key in url2hash:
return url2hash[url_key]
local_path = os.path.join(dldir, hash.hexdigest())
shutil.move(f.name, local_path)
url2hash[url_key] = local_path
finally:
_release_download_cache_lock()
else:
local_path = f.name
if missing_cache:
msg = ('File downloaded to temporary location due to problem '
'with cache directory and will not be cached.')
warn(CacheMissingWarning(msg, local_path))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(local_path)
except urllib.error.URLError as e:
if hasattr(e, 'reason') and hasattr(e.reason, 'errno') and e.reason.errno == 8:
e.reason.strerror = e.reason.strerror + '. requested URL: ' + remote_url
e.reason.args = (e.reason.errno, e.reason.strerror)
raise e
except socket.timeout as e:
# this isn't supposed to happen, but occasionally a socket.timeout gets
# through. It's supposed to be caught in `urrlib2` and raised in this
# way, but for some reason in mysterious circumstances it doesn't. So
# we'll just re-raise it here instead
raise urllib.error.URLError(e)
return local_path
def is_url_in_cache(url_key):
"""
Check if a download from ``url_key`` is in the cache.
Parameters
----------
url_key : string
The URL retrieved
Returns
-------
in_cache : bool
`True` if a download from ``url_key`` is in the cache
"""
# The code below is modified from astropy.utils.data.download_file()
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return False
with shelve.open(urlmapfn) as url2hash:
if url_key in url2hash:
return True
return False
def _do_download_files_in_parallel(args):
return download_file(*args)
def download_files_in_parallel(urls, cache=True, show_progress=True,
timeout=None):
"""
Downloads multiple files in parallel from the given URLs. Blocks until
all files have downloaded. The result is a list of local file paths
corresponding to the given urls.
Parameters
----------
urls : list of str
The URLs to retrieve.
cache : bool, optional
Whether to use the cache (default is `True`).
.. versionchanged:: 3.0
The default was changed to ``True`` and setting it to ``False`` will
print a Warning and set it to ``True`` again, because the function
will not work properly without cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`)
timeout : float, optional
Timeout for each individual requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
Returns
-------
paths : list of str
The local file paths corresponding to the downloaded URLs.
"""
from .console import ProgressBar
if timeout is None:
timeout = conf.remote_timeout
if not cache:
# See issue #6662, on windows won't work because the files are removed
# again before they can be used. On *NIX systems it will behave as if
# cache was set to True because multiprocessing cannot insert the items
# in the list of to-be-removed files.
warn("Disabling the cache does not work because of multiprocessing, it "
"will be set to ``True``. You may need to manually remove the "
"cached files afterwards.", AstropyWarning)
cache = True
if show_progress:
progress = sys.stdout
else:
progress = io.BytesIO()
# Combine duplicate URLs
combined_urls = list(set(urls))
combined_paths = ProgressBar.map(
_do_download_files_in_parallel,
[(x, cache, False, timeout) for x in combined_urls],
file=progress,
multiprocess=True)
paths = []
for url in urls:
paths.append(combined_paths[combined_urls.index(url)])
return paths
# This is used by download_file and _deltemps to determine the files to delete
# when the interpreter exits
_tempfilestodel = []
@atexit.register
def _deltemps():
global _tempfilestodel
if _tempfilestodel is not None:
while len(_tempfilestodel) > 0:
fn = _tempfilestodel.pop()
if os.path.isfile(fn):
os.remove(fn)
def clear_download_cache(hashorurl=None):
""" Clears the data file cache by deleting the local file(s).
Parameters
----------
hashorurl : str or None
If None, the whole cache is cleared. Otherwise, either specifies a
hash for the cached file that is supposed to be deleted, or a URL that
should be removed from the cache if present.
"""
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Not clearing data cache - cache inacessable due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return
_acquire_download_cache_lock()
try:
if hashorurl is None:
# dldir includes both the download files and the urlmapfn. This structure
# is required since we cannot know a priori the actual file name corresponding
# to the shelve map named urlmapfn.
if os.path.exists(dldir):
shutil.rmtree(dldir)
else:
with shelve.open(urlmapfn) as url2hash:
filepath = os.path.join(dldir, hashorurl)
if not _is_inside(filepath, dldir):
raise RuntimeError("attempted to use clear_download_cache on"
" a path outside the data cache directory")
hash_key = hashorurl
if os.path.exists(filepath):
for k, v in url2hash.items():
if v == filepath:
del url2hash[k]
os.unlink(filepath)
elif hash_key in url2hash:
filepath = url2hash[hash_key]
del url2hash[hash_key]
if os.path.exists(filepath):
# Make sure the filepath still actually exists (perhaps user removed it)
os.unlink(filepath)
# Otherwise could not find file or url, but no worries.
# Clearing download cache just makes sure that the file or url
# is no longer in the cache regardless of starting condition.
finally:
# the lock will be gone if rmtree was used above, but release otherwise
if os.path.exists(os.path.join(dldir, 'lock')):
_release_download_cache_lock()
def _get_download_cache_locs():
""" Finds the path to the data cache directory and makes them if
they don't exist.
Returns
-------
datadir : str
The path to the data cache directory.
shelveloc : str
The path to the shelve object that stores the cache info.
"""
from ..config.paths import get_cache_dir
# datadir includes both the download files and the shelveloc. This structure
# is required since we cannot know a priori the actual file name corresponding
# to the shelve map named shelveloc. (The backend can vary and is allowed to
# do whatever it wants with the filename. Filename munging can and does happen
# in practice).
py_version = 'py' + str(sys.version_info.major)
datadir = os.path.join(get_cache_dir(), 'download', py_version)
shelveloc = os.path.join(datadir, 'urlmap')
if not os.path.exists(datadir):
try:
os.makedirs(datadir)
except OSError as e:
if not os.path.exists(datadir):
raise
elif not os.path.isdir(datadir):
msg = 'Data cache directory {0} is not a directory'
raise OSError(msg.format(datadir))
if os.path.isdir(shelveloc):
msg = 'Data cache shelve object location {0} is a directory'
raise OSError(msg.format(shelveloc))
return datadir, shelveloc
# the cache directory must be locked before any writes are performed. Same for
# the hash shelve, so this should be used for both.
def _acquire_download_cache_lock():
"""
Uses the lock directory method. This is good because `mkdir` is
atomic at the system call level, so it's thread-safe.
"""
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
for i in range(conf.download_cache_lock_attempts):
try:
os.mkdir(lockdir)
# write the pid of this process for informational purposes
with open(os.path.join(lockdir, 'pid'), 'w') as f:
f.write(str(os.getpid()))
except OSError:
time.sleep(1)
else:
return
msg = ("Unable to acquire lock for cache directory ({0} exists). "
"You may need to delete the lock if the python interpreter wasn't "
"shut down properly.")
raise RuntimeError(msg.format(lockdir))
def _release_download_cache_lock():
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
if os.path.isdir(lockdir):
# if the pid file is present, be sure to remove it
pidfn = os.path.join(lockdir, 'pid')
if os.path.exists(pidfn):
os.remove(pidfn)
os.rmdir(lockdir)
else:
msg = 'Error releasing lock. "{0}" either does not exist or is not ' +\
'a directory.'
raise RuntimeError(msg.format(lockdir))
def get_cached_urls():
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
Returns
-------
cached_urls : list
List of cached URLs.
"""
# The code below is modified from astropy.utils.data.download_file()
try:
dldir, urlmapfn = _get_download_cache_locs()
except OSError as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return False
with shelve.open(urlmapfn) as url2hash:
return list(url2hash.keys())
|
ae78f36edb2892a42f2982be7caf61e91f4582e8cc15df13b25fa9a717d0063c | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A "grab bag" of relatively small general-purpose utilities that don't have
a clear module/package to live in.
"""
import abc
import contextlib
import difflib
import inspect
import json
import os
import signal
import sys
import traceback
import unicodedata
import locale
import threading
import re
from itertools import zip_longest
from contextlib import contextmanager
from collections import defaultdict, OrderedDict
__all__ = ['isiterable', 'silence', 'format_exception', 'NumpyRNGContext',
'find_api_page', 'is_path_hidden', 'walk_skip_hidden',
'JsonCustomEncoder', 'indent', 'InheritDocstrings',
'OrderedDescriptor', 'OrderedDescriptorContainer', 'set_locale',
'ShapedLikeNDArray', 'check_broadcast', 'IncompatibleShapeError',
'dtype_bytes_or_chars']
def isiterable(obj):
"""Returns `True` if the given object is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = '\n'.join(' ' * (width * shift) + l if l else ''
for l in s.splitlines())
if s[-1] == '\n':
indented += '\n'
return indented
class _DummyFile:
"""A noop writeable object."""
def write(self, s):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
yield
sys.stdout = old_stdout
sys.stderr = old_stderr
def format_exception(msg, *args, **kwargs):
"""
Given an exception message string, uses new-style formatting arguments
``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in
information about the exception that occurred. For example:
try:
1/0
except:
raise ZeroDivisionError(
format_except('A divide by zero occurred in {filename} at '
'line {lineno} of function {func}.'))
Any additional positional or keyword arguments passed to this function are
also used to format the message.
.. note::
This uses `sys.exc_info` to gather up the information needed to fill
in the formatting arguments. Since `sys.exc_info` is not carried
outside a handled exception, it's not wise to use this
outside of an ``except`` clause - if it is, this will substitute
'<unkown>' for the 4 formatting arguments.
"""
tb = traceback.extract_tb(sys.exc_info()[2], limit=1)
if len(tb) > 0:
filename, lineno, func, text = tb[0]
else:
filename = lineno = func = text = '<unknown>'
return msg.format(*args, filename=filename, lineno=lineno, func=func,
text=text, **kwargs)
class NumpyRNGContext:
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
from numpy import random
self.startstate = random.get_state()
random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
from numpy import random
random.set_state(self.startstate)
def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
import urllib.request
from zlib import decompress
if (not isinstance(obj, str) and
hasattr(obj, '__module__') and
hasattr(obj, '__name__')):
obj = obj.__module__ + '.' + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from .. import version
if version.release:
version = 'v' + version.version
else:
version = 'dev'
if '://' in version:
if version.endswith('index.html'):
baseurl = version[:-10]
elif version.endswith('/'):
baseurl = version
else:
baseurl = version + '/'
elif version == 'dev' or version == 'latest':
baseurl = 'http://devdocs.astropy.org/'
else:
baseurl = 'http://docs.astropy.org/en/{vers}/'.format(vers=version)
if timeout is None:
uf = urllib.request.urlopen(baseurl + 'objects.inv')
else:
uf = urllib.request.urlopen(baseurl + 'objects.inv', timeout=timeout)
try:
oiread = uf.read()
# need to first read/remove the first four lines, which have info before
# the compressed section with the actual object inventory
idx = -1
headerlines = []
for _ in range(4):
oldidx = idx
idx = oiread.index(b'\n', oldidx + 1)
headerlines.append(oiread[(oldidx+1):idx].decode('utf-8'))
# intersphinx version line, project name, and project version
ivers, proj, vers, compr = headerlines
if 'The remainder of this file is compressed using zlib' not in compr:
raise ValueError('The file downloaded from {0} does not seem to be'
'the usual Sphinx objects.inv format. Maybe it '
'has changed?'.format(baseurl + 'objects.inv'))
compressed = oiread[(idx+1):]
finally:
uf.close()
decompressed = decompress(compressed).decode('utf-8')
resurl = None
for l in decompressed.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith('$'):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError('Could not find the docs for the object {obj}'.format(obj=obj))
elif openinbrowser:
webbrowser.open(resurl)
return resurl
def signal_number_to_name(signum):
"""
Given an OS signal number, returns a signal name. If the signal
number is unknown, returns ``'UNKNOWN'``.
"""
# Since these numbers and names are platform specific, we use the
# builtin signal module and build a reverse mapping.
signal_to_name_map = dict((k, v) for v, k in signal.__dict__.items()
if v.startswith('SIG'))
return signal_to_name_map.get(signum, 'UNKNOWN')
if sys.platform == 'win32':
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
http://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
result = bool(attrs & 2) and attrs != -1
except AttributeError:
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b'.')
else:
is_dotted = name.startswith('.')
return is_dotted or _has_hidden_attribute(filepath)
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter ``topdown`` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror,
followlinks=followlinks):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
class JsonCustomEncoder(json.JSONEncoder):
"""Support for data types that JSON default encoder
does not do.
This includes:
* Numpy array or number
* Complex number
* Set
* Bytes
* astropy.UnitBase
* astropy.Quantity
Examples
--------
>>> import json
>>> import numpy as np
>>> from astropy.utils.misc import JsonCustomEncoder
>>> json.dumps(np.arange(3), cls=JsonCustomEncoder)
'[0, 1, 2]'
"""
def default(self, obj):
from .. import units as u
import numpy as np
if isinstance(obj, u.Quantity):
return dict(value=obj.value, unit=obj.unit.to_string())
if isinstance(obj, (np.number, np.ndarray)):
return obj.tolist()
elif isinstance(obj, complex):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)):
if obj == u.dimensionless_unscaled:
obj = 'dimensionless_unit'
else:
return obj.to_string()
return json.JSONEncoder.default(self, obj)
def strip_accents(s):
"""
Remove accents from a Unicode string.
This helps with matching "ångström" to "angstrom", for example.
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def did_you_mean(s, candidates, n=3, cutoff=0.8, fix=None):
"""
When a string isn't found in a set of candidates, we can be nice
to provide a list of alternatives in the exception. This
convenience function helps to format that part of the exception.
Parameters
----------
s : str
candidates : sequence of str or dict of str keys
n : int
The maximum number of results to include. See
`difflib.get_close_matches`.
cutoff : float
In the range [0, 1]. Possibilities that don't score at least
that similar to word are ignored. See
`difflib.get_close_matches`.
fix : callable
A callable to modify the results after matching. It should
take a single string and return a sequence of strings
containing the fixed matches.
Returns
-------
message : str
Returns the string "Did you mean X, Y, or Z?", or the empty
string if no alternatives were found.
"""
if isinstance(s, str):
s = strip_accents(s)
s_lower = s.lower()
# Create a mapping from the lower case name to all capitalization
# variants of that name.
candidates_lower = {}
for candidate in candidates:
candidate_lower = candidate.lower()
candidates_lower.setdefault(candidate_lower, [])
candidates_lower[candidate_lower].append(candidate)
# The heuristic here is to first try "singularizing" the word. If
# that doesn't match anything use difflib to find close matches in
# original, lower and upper case.
if s_lower.endswith('s') and s_lower[:-1] in candidates_lower:
matches = [s_lower[:-1]]
else:
matches = difflib.get_close_matches(
s_lower, candidates_lower, n=n, cutoff=cutoff)
if len(matches):
capitalized_matches = set()
for match in matches:
capitalized_matches.update(candidates_lower[match])
matches = capitalized_matches
if fix is not None:
mapped_matches = []
for match in matches:
mapped_matches.extend(fix(match))
matches = mapped_matches
matches = list(set(matches))
matches = sorted(matches)
if len(matches) == 1:
matches = matches[0]
else:
matches = (', '.join(matches[:-1]) + ' or ' +
matches[-1])
return 'Did you mean {0}?'.format(matches)
return ''
class InheritDocstrings(type):
"""
This metaclass makes methods of a class automatically have their
docstrings filled in from the methods they override in the base
class.
If the class uses multiple inheritance, the docstring will be
chosen from the first class in the bases list, in the same way as
methods are normally resolved in Python. If this results in
selecting the wrong docstring, the docstring will need to be
explicitly included on the method.
For example::
>>> from astropy.utils.misc import InheritDocstrings
>>> class A(metaclass=InheritDocstrings):
... def wiggle(self):
... "Wiggle the thingamajig"
... pass
>>> class B(A):
... def wiggle(self):
... pass
>>> B.wiggle.__doc__
u'Wiggle the thingamajig'
"""
def __init__(cls, name, bases, dct):
def is_public_member(key):
return (
(key.startswith('__') and key.endswith('__')
and len(key) > 4) or
not key.startswith('_'))
for key, val in dct.items():
if ((inspect.isfunction(val) or inspect.isdatadescriptor(val)) and
is_public_member(key) and
val.__doc__ is None):
for base in cls.__mro__[1:]:
super_method = getattr(base, key, None)
if super_method is not None:
val.__doc__ = super_method.__doc__
break
super().__init__(name, bases, dct)
class OrderedDescriptor(metaclass=abc.ABCMeta):
"""
Base class for descriptors whose order in the class body should be
preserved. Intended for use in concert with the
`OrderedDescriptorContainer` metaclass.
Subclasses of `OrderedDescriptor` must define a value for a class attribute
called ``_class_attribute_``. This is the name of a class attribute on the
*container* class for these descriptors, which will be set to an
`~collections.OrderedDict` at class creation time. This
`~collections.OrderedDict` will contain a mapping of all class attributes
that were assigned instances of the `OrderedDescriptor` subclass, to the
instances themselves. See the documentation for
`OrderedDescriptorContainer` for a concrete example.
Optionally, subclasses of `OrderedDescriptor` may define a value for a
class attribute called ``_name_attribute_``. This should be the name of
an attribute on instances of the subclass. When specified, during
creation of a class containing these descriptors, the name attribute on
each instance will be set to the name of the class attribute it was
assigned to on the class.
.. note::
Although this class is intended for use with *descriptors* (i.e.
classes that define any of the ``__get__``, ``__set__``, or
``__delete__`` magic methods), this base class is not itself a
descriptor, and technically this could be used for classes that are
not descriptors too. However, use with descriptors is the original
intended purpose.
"""
# This id increments for each OrderedDescriptor instance created, so they
# are always ordered in the order they were created. Class bodies are
# guaranteed to be executed from top to bottom. Not sure if this is
# thread-safe though.
_nextid = 1
@property
@abc.abstractmethod
def _class_attribute_(self):
"""
Subclasses should define this attribute to the name of an attribute on
classes containing this subclass. That attribute will contain the mapping
of all instances of that `OrderedDescriptor` subclass defined in the class
body. If the same descriptor needs to be used with different classes,
each with different names of this attribute, multiple subclasses will be
needed.
"""
_name_attribute_ = None
"""
Subclasses may optionally define this attribute to specify the name of an
attribute on instances of the class that should be filled with the
instance's attribute name at class creation time.
"""
def __init__(self, *args, **kwargs):
# The _nextid attribute is shared across all subclasses so that
# different subclasses of OrderedDescriptors can be sorted correctly
# between themselves
self.__order = OrderedDescriptor._nextid
OrderedDescriptor._nextid += 1
super().__init__()
def __lt__(self, other):
"""
Defined for convenient sorting of `OrderedDescriptor` instances, which
are defined to sort in their creation order.
"""
if (isinstance(self, OrderedDescriptor) and
isinstance(other, OrderedDescriptor)):
try:
return self.__order < other.__order
except AttributeError:
raise RuntimeError(
'Could not determine ordering for {0} and {1}; at least '
'one of them is not calling super().__init__ in its '
'__init__.'.format(self, other))
else:
return NotImplemented
class OrderedDescriptorContainer(type):
"""
Classes should use this metaclass if they wish to use `OrderedDescriptor`
attributes, which are class attributes that "remember" the order in which
they were defined in the class body.
Every subclass of `OrderedDescriptor` has an attribute called
``_class_attribute_``. For example, if we have
.. code:: python
class ExampleDecorator(OrderedDescriptor):
_class_attribute_ = '_examples_'
Then when a class with the `OrderedDescriptorContainer` metaclass is
created, it will automatically be assigned a class attribute ``_examples_``
referencing an `~collections.OrderedDict` containing all instances of
``ExampleDecorator`` defined in the class body, mapped to by the names of
the attributes they were assigned to.
When subclassing a class with this metaclass, the descriptor dict (i.e.
``_examples_`` in the above example) will *not* contain descriptors
inherited from the base class. That is, this only works by default with
decorators explicitly defined in the class body. However, the subclass
*may* define an attribute ``_inherit_decorators_`` which lists
`OrderedDescriptor` classes that *should* be added from base classes.
See the examples section below for an example of this.
Examples
--------
>>> from astropy.utils import OrderedDescriptor, OrderedDescriptorContainer
>>> class TypedAttribute(OrderedDescriptor):
... \"\"\"
... Attributes that may only be assigned objects of a specific type,
... or subclasses thereof. For some reason we care about their order.
... \"\"\"
...
... _class_attribute_ = 'typed_attributes'
... _name_attribute_ = 'name'
... # A default name so that instances not attached to a class can
... # still be repr'd; useful for debugging
... name = '<unbound>'
...
... def __init__(self, type):
... # Make sure not to forget to call the super __init__
... super().__init__()
... self.type = type
...
... def __get__(self, obj, objtype=None):
... if obj is None:
... return self
... if self.name in obj.__dict__:
... return obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __set__(self, obj, value):
... if not isinstance(value, self.type):
... raise ValueError('{0}.{1} must be of type {2!r}'.format(
... obj.__class__.__name__, self.name, self.type))
... obj.__dict__[self.name] = value
...
... def __delete__(self, obj):
... if self.name in obj.__dict__:
... del obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __repr__(self):
... if isinstance(self.type, tuple) and len(self.type) > 1:
... typestr = '({0})'.format(
... ', '.join(t.__name__ for t in self.type))
... else:
... typestr = self.type.__name__
... return '<{0}(name={1}, type={2})>'.format(
... self.__class__.__name__, self.name, typestr)
...
Now let's create an example class that uses this ``TypedAttribute``::
>>> class Point2D(metaclass=OrderedDescriptorContainer):
... x = TypedAttribute((float, int))
... y = TypedAttribute((float, int))
...
... def __init__(self, x, y):
... self.x, self.y = x, y
...
>>> p1 = Point2D(1.0, 2.0)
>>> p1.x
1.0
>>> p1.y
2.0
>>> p2 = Point2D('a', 'b') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Point2D.x must be of type (float, int>)
We see that ``TypedAttribute`` works more or less as advertised, but
there's nothing special about that. Let's see what
`OrderedDescriptorContainer` did for us::
>>> Point2D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>)])
If we create a subclass, it does *not* by default add inherited descriptors
to ``typed_attributes``::
>>> class Point3D(Point2D):
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('z', <TypedAttribute(name=z, type=(float, int))>)])
However, if we specify ``_inherit_descriptors_`` from ``Point2D`` then
it will do so::
>>> class Point3D(Point2D):
... _inherit_descriptors_ = (TypedAttribute,)
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>),
('z', <TypedAttribute(name=z, type=(float, int))>)])
.. note::
Hopefully it is clear from these examples that this construction
also allows a class of type `OrderedDescriptorContainer` to use
multiple different `OrderedDescriptor` classes simultaneously.
"""
_inherit_descriptors_ = ()
def __init__(cls, cls_name, bases, members):
descriptors = defaultdict(list)
seen = set()
inherit_descriptors = ()
descr_bases = {}
for mro_cls in cls.__mro__:
for name, obj in mro_cls.__dict__.items():
if name in seen:
# Checks if we've already seen an attribute of the given
# name (if so it will override anything of the same name in
# any base class)
continue
seen.add(name)
if (not isinstance(obj, OrderedDescriptor) or
(inherit_descriptors and
not isinstance(obj, inherit_descriptors))):
# The second condition applies when checking any
# subclasses, to see if we can inherit any descriptors of
# the given type from subclasses (by default inheritance is
# disabled unless the class has _inherit_descriptors_
# defined)
continue
if obj._name_attribute_ is not None:
setattr(obj, obj._name_attribute_, name)
# Don't just use the descriptor's class directly; instead go
# through its MRO and find the class on which _class_attribute_
# is defined directly. This way subclasses of some
# OrderedDescriptor *may* override _class_attribute_ and have
# its own _class_attribute_, but by default all subclasses of
# some OrderedDescriptor are still grouped together
# TODO: It might be worth clarifying this in the docs
if obj.__class__ not in descr_bases:
for obj_cls_base in obj.__class__.__mro__:
if '_class_attribute_' in obj_cls_base.__dict__:
descr_bases[obj.__class__] = obj_cls_base
descriptors[obj_cls_base].append((obj, name))
break
else:
# Make sure to put obj first for sorting purposes
obj_cls_base = descr_bases[obj.__class__]
descriptors[obj_cls_base].append((obj, name))
if not getattr(mro_cls, '_inherit_descriptors_', False):
# If _inherit_descriptors_ is undefined then we don't inherit
# any OrderedDescriptors from any of the base classes, and
# there's no reason to continue through the MRO
break
else:
inherit_descriptors = mro_cls._inherit_descriptors_
for descriptor_cls, instances in descriptors.items():
instances.sort()
instances = OrderedDict((key, value) for value, key in instances)
setattr(cls, descriptor_cls._class_attribute_, instances)
super().__init__(cls_name, bases, members)
LOCALE_LOCK = threading.Lock()
@contextmanager
def set_locale(name):
"""
Context manager to temporarily set the locale to ``name``.
An example is setting locale to "C" so that the C strtod()
function will use "." as the decimal point to enable consistent
numerical string parsing.
Note that one cannot nest multiple set_locale() context manager
statements as this causes a threading lock.
This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale.
Parameters
==========
name : str
Locale name, e.g. "C" or "fr_FR".
"""
name = str(name)
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
if saved == name:
# Don't do anything if locale is already the requested locale
yield
else:
try:
locale.setlocale(locale.LC_ALL, name)
yield
finally:
locale.setlocale(locale.LC_ALL, saved)
class ShapedLikeNDArray(metaclass=abc.ABCMeta):
"""Mixin class to provide shape-changing methods.
The class proper is assumed to have some underlying data, which are arrays
or array-like structures. It must define a ``shape`` property, which gives
the shape of those data, as well as an ``_apply`` method that creates a new
instance in which a `~numpy.ndarray` method has been applied to those.
Furthermore, for consistency with `~numpy.ndarray`, it is recommended to
define a setter for the ``shape`` property, which, like the
`~numpy.ndarray.shape` property allows in-place reshaping the internal data
(and, unlike the ``reshape`` method raises an exception if this is not
possible).
This class also defines default implementations for ``ndim`` and ``size``
properties, calculating those from the ``shape``. These can be overridden
by subclasses if there are faster ways to obtain those numbers.
"""
# Note to developers: if new methods are added here, be sure to check that
# they work properly with the classes that use this, such as Time and
# BaseRepresentation, i.e., look at their ``_apply`` methods and add
# relevant tests. This is particularly important for methods that imply
# copies rather than views of data (see the special-case treatment of
# 'flatten' in Time).
@property
@abc.abstractmethod
def shape(self):
"""The shape of the instance and underlying arrays."""
@abc.abstractmethod
def _apply(method, *args, **kwargs):
"""Create a new instance, with ``method`` applied to underlying data.
The method is any of the shape-changing methods for `~numpy.ndarray`
(``reshape``, ``swapaxes``, etc.), as well as those picking particular
elements (``__getitem__``, ``take``, etc.). It will be applied to the
underlying arrays (e.g., ``jd1`` and ``jd2`` in `~astropy.time.Time`),
with the results used to create a new instance.
Parameters
----------
method : str
Method to be applied to the instance's internal data arrays.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
@property
def ndim(self):
"""The number of dimensions of the instance and underlying arrays."""
return len(self.shape)
@property
def size(self):
"""The size of the object, as calculated from its shape."""
size = 1
for sh in self.shape:
size *= sh
return size
@property
def isscalar(self):
return self.shape == ()
def __len__(self):
if self.isscalar:
raise TypeError("Scalar {0!r} object has no len()"
.format(self.__class__.__name__))
return self.shape[0]
def __bool__(self):
"""Any instance should evaluate to True, except when it is empty."""
return self.size > 0
def __getitem__(self, item):
try:
return self._apply('__getitem__', item)
except IndexError:
if self.isscalar:
raise TypeError('scalar {0!r} object is not subscriptable.'
.format(self.__class__.__name__))
else:
raise
def __iter__(self):
if self.isscalar:
raise TypeError('scalar {0!r} object is not iterable.'
.format(self.__class__.__name__))
# We cannot just write a generator here, since then the above error
# would only be raised once we try to use the iterator, rather than
# upon its definition using iter(self).
def self_iter():
for idx in range(len(self)):
yield self[idx]
return self_iter()
def copy(self, *args, **kwargs):
"""Return an instance containing copies of the internal data.
Parameters are as for :meth:`~numpy.ndarray.copy`.
"""
return self._apply('copy', *args, **kwargs)
def reshape(self, *args, **kwargs):
"""Returns an instance containing the same data with a new shape.
Parameters are as for :meth:`~numpy.ndarray.reshape`. Note that it is
not always possible to change the shape of an array without copying the
data (see :func:`~numpy.reshape` documentation). If you want an error
to be raise if the data is copied, you should assign the new shape to
the shape attribute (note: this may not be implemented for all classes
using ``ShapedLikeNDArray``).
"""
return self._apply('reshape', *args, **kwargs)
def ravel(self, *args, **kwargs):
"""Return an instance with the array collapsed into one dimension.
Parameters are as for :meth:`~numpy.ndarray.ravel`. Note that it is
not always possible to unravel an array without copying the data.
If you want an error to be raise if the data is copied, you should
should assign shape ``(-1,)`` to the shape attribute.
"""
return self._apply('ravel', *args, **kwargs)
def flatten(self, *args, **kwargs):
"""Return a copy with the array collapsed into one dimension.
Parameters are as for :meth:`~numpy.ndarray.flatten`.
"""
return self._apply('flatten', *args, **kwargs)
def transpose(self, *args, **kwargs):
"""Return an instance with the data transposed.
Parameters are as for :meth:`~numpy.ndarray.transpose`. All internal
data are views of the data of the original.
"""
return self._apply('transpose', *args, **kwargs)
@property
def T(self):
"""Return an instance with the data transposed.
Parameters are as for :attr:`~numpy.ndarray.T`. All internal
data are views of the data of the original.
"""
if self.ndim < 2:
return self
else:
return self.transpose()
def swapaxes(self, *args, **kwargs):
"""Return an instance with the given axes interchanged.
Parameters are as for :meth:`~numpy.ndarray.swapaxes`:
``axis1, axis2``. All internal data are views of the data of the
original.
"""
return self._apply('swapaxes', *args, **kwargs)
def diagonal(self, *args, **kwargs):
"""Return an instance with the specified diagonals.
Parameters are as for :meth:`~numpy.ndarray.diagonal`. All internal
data are views of the data of the original.
"""
return self._apply('diagonal', *args, **kwargs)
def squeeze(self, *args, **kwargs):
"""Return an instance with single-dimensional shape entries removed
Parameters are as for :meth:`~numpy.ndarray.squeeze`. All internal
data are views of the data of the original.
"""
return self._apply('squeeze', *args, **kwargs)
def take(self, indices, axis=None, mode='raise'):
"""Return a new instance formed from the elements at the given indices.
Parameters are as for :meth:`~numpy.ndarray.take`, except that,
obviously, no output array can be given.
"""
return self._apply('take', indices, axis=axis, mode=mode)
class IncompatibleShapeError(ValueError):
def __init__(self, shape_a, shape_a_idx, shape_b, shape_b_idx):
super().__init__(shape_a, shape_a_idx, shape_b, shape_b_idx)
def check_broadcast(*shapes):
"""
Determines whether two or more Numpy arrays can be broadcast with each
other based on their shape tuple alone.
Parameters
----------
*shapes : tuple
All shapes to include in the comparison. If only one shape is given it
is passed through unmodified. If no shapes are given returns an empty
`tuple`.
Returns
-------
broadcast : `tuple`
If all shapes are mutually broadcastable, returns a tuple of the full
broadcast shape.
"""
if len(shapes) == 0:
return ()
elif len(shapes) == 1:
return shapes[0]
reversed_shapes = (reversed(shape) for shape in shapes)
full_shape = []
for dims in zip_longest(*reversed_shapes, fillvalue=1):
max_dim = 1
max_dim_idx = None
for idx, dim in enumerate(dims):
if dim == 1:
continue
if max_dim == 1:
# The first dimension of size greater than 1
max_dim = dim
max_dim_idx = idx
elif dim != max_dim:
raise IncompatibleShapeError(
shapes[max_dim_idx], max_dim_idx, shapes[idx], idx)
full_shape.append(max_dim)
return tuple(full_shape[::-1])
def dtype_bytes_or_chars(dtype):
"""
Parse the number out of a dtype.str value like '<U5' or '<f8'.
See #5819 for discussion on the need for this function for getting
the number of characters corresponding to a string dtype.
Parameters
----------
dtype : numpy dtype object
Input dtype
Returns
-------
bytes_or_chars : int or None
Bits (for numeric types) or characters (for string types)
"""
match = re.search(r'(\d+)$', dtype.str)
out = int(match.group(1)) if match else None
return out
def pizza(): # pragma: no cover
"""
Open browser loaded with pizza options near you.
*Disclaimers: Payments not included. Astropy is not
responsible for any liability from using this function.*
.. note:: Accuracy depends on your browser settings.
"""
import webbrowser
webbrowser.open('https://www.google.com/search?q=pizza+near+me')
|
4f96ba30886471d7e37d3477025795a7a6ace7974f8556dca3ba8c2f63a42bc3 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__doctest_skip__ = ['quantity_support']
def quantity_support(format='latex_inline'):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from .. import units as u
from matplotlib import units
from matplotlib import ticker
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return 'π/2'
elif n == 2:
return 'π'
elif n % 2 == 0:
return '{0}π'.format(n / 2)
else:
return '{0}π/2'.format(n)
class MplQuantityConverter(units.ConversionInterface):
def __init__(self):
if u.Quantity not in units.registry:
units.registry[u.Quantity] = self
self._remove = True
else:
self._remove = False
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter('%i°'),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to_value(unit)
elif isinstance(val, list) and isinstance(val[0], u.Quantity):
return [v.to_value(unit) for v in val]
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, 'unit'):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._remove:
del units.registry[u.Quantity]
return MplQuantityConverter()
|
9683cb04c5a4334e91f352b5c11022ce3bf0f7138789dff6649e966925c0061c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import numpy as np
from .compat import NDDataArray
from .nduncertainty import StdDevUncertainty, NDUncertainty
from ..io import fits, registry
from .. import units as u
from .. import log
from ..wcs import WCS
from ..utils.decorators import sharedmethod
__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer']
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarly disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand,
operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = ("See `astropy.nddata.NDArithmeticMixin.{}`."
"".format(func.__name__))
return sharedmethod(inner)
return decorator
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
-----------
data : `~astropy.nddata.CCDData`-like or `numpy.ndarray`-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, `numpy.ndarray` or \
None, optional
Uncertainties on the data.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if 'meta' not in kwd:
kwd['meta'] = kwd.pop('header', None)
if 'header' in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
# Check if a unit is set. This can be temporarly disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, '_parent_nddata', None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as "
"data.")
self._uncertainty = StdDevUncertainty(value)
log.info("array provided for uncertainty; assuming it is a "
"StdDevUncertainty.")
else:
raise TypeError("uncertainty must be an instance of a "
"NDUncertainty object or a numpy array.")
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, wcs_relax=True):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header['bunit'] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, 'shape'):
raise ValueError('only a numpy.ndarray mask can be saved.')
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
if self.uncertainty.__class__.__name__ != 'StdDevUncertainty':
raise ValueError('only StdDevUncertainty can be saved.')
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if (hasattr(self.uncertainty, 'unit') and
self.uncertainty.unit is not None and
self.uncertainty.unit != self.unit):
raise ValueError('saving uncertainties with a unit differing'
'from the data unit is not supported.')
hduUncert = fits.ImageHDU(self.uncertainty.array,
name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError('adding the flags to a HDU is not '
'supported at this time.')
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta['HIERARCH {0}'.format(key.upper())] = (
short_name, "Shortened name for {}".format(key))
self.meta[short_name] = value
else:
self.meta[key] = value
# This needs to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = [
'JD-OBS',
'MJD-OBS',
'DATE-OBS'
]
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info('An exception happened while extracting WCS informations from '
'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',
hdu_mask='MASK', hdu_flags=None, **kwd):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, optional
FITS extension from which CCDData should be initialized. If zero and
and no data in the primary extension, it will search for the first
extension with data. The header will be added to the primary header.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
'do_not_scale_image_data': 'Image data must be scaled.',
'scale_back': 'Scale information is not preserved.'
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = 'unsupported keyword: {0}.'.format(key)
raise TypeError(' '.join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
uncertainty = StdDevUncertainty(hdus[hdu_uncertainty].data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError('loading flags is currently not '
'supported.')
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if hdus.fileinfo(i)['datSpan'] > 0:
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info("first HDU with data is extension "
"{0}.".format(hdu))
break
if 'bunit' in hdr:
fits_unit_string = hdr['bunit']
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == 'adu':
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
'The Header value for the key BUNIT ({}) cannot be '
'interpreted as valid unit. To successfully read the '
'file as CCDData you can pass in a valid `unit` '
'argument explicitly or change the header of the FITS '
'file before reading it.'
.format(fits_unit_string))
else:
log.info("using the unit {0} passed to the FITS reader instead "
"of the unit {1} in the FITS file."
.format(unit, fits_unit_string))
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,
mask=mask, uncertainty=uncertainty, wcs=wcs)
return ccd_data
def fits_ccddata_writer(ccd_data, filename, hdu_mask='MASK',
hdu_uncertainty='UNCERT', hdu_flags=None, **kwd):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty,
hdu_flags=hdu_flags)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer('fits', CCDData, fits_ccddata_writer)
registry.register_identifier('fits', CCDData, fits.connect.is_fits)
try:
CCDData.read.__doc__ = fits_ccddata_reader.__doc__
except AttributeError:
CCDData.read.__func__.__doc__ = fits_ccddata_reader.__doc__
try:
CCDData.write.__doc__ = fits_ccddata_writer.__doc__
except AttributeError:
CCDData.write.__func__.__doc__ = fits_ccddata_writer.__doc__
|
b4a7297ae136d9c53a16c33b6fac231996d4a369468d3552c4f60945ea319bbe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
from copy import deepcopy
import numpy as np
from .decorators import support_nddata
from .. import units as u
from ..coordinates import SkyCoord
from ..utils import lazyproperty
from ..wcs.utils import skycoord_to_pixel, proj_plane_pixel_scales
from ..wcs import Sip
__all__ = ['extract_array', 'add_array', 'subpixel_indices',
'overlap_slices', 'block_reduce', 'block_replicate',
'NoOverlapError', 'PartialOverlapError', 'Cutout2D']
class NoOverlapError(ValueError):
'''Raised when determining the overlap of non-overlapping arrays.'''
pass
class PartialOverlapError(ValueError):
'''Raised when arrays only partially overlap.'''
pass
def _round(a):
'''Always round up.
``np.round`` cannot be used here, because it rounds .5 to the nearest
even number.
'''
return int(np.floor(a + 0.5))
def _offset(a):
'''Offset by 0.5 for an even array.
For an array with an odd number of elements, the center is
symmetric, e.g. for 3 elements, it's center +/-1 elements, but for
four elements it's center -2 / +1
This function introduces that offset.
'''
if np.mod(a, 2) == 0:
return -0.5
else:
return 0.
def overlap_slices(large_array_shape, small_array_shape, position,
mode='partial'):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : tuple of int or int
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slices
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ['partial', 'trim', 'strict']:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape, )
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape, )
if np.isscalar(position):
position = (position, )
if len(small_array_shape) != len(large_array_shape):
raise ValueError('"large_array_shape" and "small_array_shape" must '
'have the same number of dimensions.')
if len(small_array_shape) != len(position):
raise ValueError('"position" must have the same number of dimensions '
'as "small_array_shape".')
# Get edge coordinates
edges_min = [_round(pos + 0.5 - small_shape / 2. + _offset(small_shape))
for (pos, small_shape) in zip(position, small_array_shape)]
edges_max = [_round(pos + 0.5 + small_shape / 2. + _offset(small_shape))
for (pos, small_shape) in zip(position, small_array_shape)]
for e_max in edges_max:
if e_max <= 0:
raise NoOverlapError('Arrays do not overlap.')
for e_min, large_shape in zip(edges_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError('Arrays do not overlap.')
if mode == 'strict':
for e_min in edges_min:
if e_min < 0:
raise PartialOverlapError('Arrays overlap only partially.')
for e_max, large_shape in zip(edges_max, large_array_shape):
if e_max >= large_shape:
raise PartialOverlapError('Arrays overlap only partially.')
# Set up slices
slices_large = tuple(slice(max(0, edge_min), min(large_shape, edge_max))
for (edge_min, edge_max, large_shape) in
zip(edges_min, edges_max, large_array_shape))
if mode == 'trim':
slices_small = tuple(slice(0, slc.stop - slc.start)
for slc in slices_large)
else:
slices_small = tuple(slice(max(0, -edge_min),
min(large_shape - edge_min,
edge_max - edge_min))
for (edge_min, edge_max, large_shape) in
zip(edges_min, edges_max, large_array_shape))
return slices_large, slices_small
def extract_array(array_large, shape, position, mode='partial',
fill_value=np.nan, return_position=False):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : `~numpy.ndarray`
The array from which to extract the small array.
shape : tuple or int
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` must have the same ``dtype`` as the
``array_large`` array.
return_position : boolean, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : `~numpy.ndarray`
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape, )
if np.isscalar(position):
position = (position, )
if mode not in ['partial', 'trim', 'strict']:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(array_large.shape,
shape, position, mode=mode)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == 'partial'):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
extracted_array[:] = fill_value
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position,
small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array
def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : `~numpy.ndarray`
Large array.
array_small : `~numpy.ndarray`
Small array to add.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : `~numpy.ndarray`
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is really larger
if all(large_shape > small_shape for (large_shape, small_shape)
in zip(array_large.shape, array_small.shape)):
large_slices, small_slices = overlap_slices(array_large.shape,
array_small.shape, position)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.")
def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : `~numpy.ndarray` or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : `~numpy.ndarray`
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling)
@support_nddata
def block_reduce(data, block_size, func=np.sum):
"""
Downsample a data array by applying a function to local blocks.
If ``data`` is not perfectly divisible by ``block_size`` along a
given axis then the data will be trimmed (from the end) along that
axis.
Parameters
----------
data : array_like
The data to be resampled.
block_size : int or array_like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
func : callable, optional
The method to use to downsample the data. Must be a callable
that takes in a `~numpy.ndarray` along with an ``axis`` keyword,
which defines the axis along which the function is applied. The
default is `~numpy.sum`, which provides block summation (and
conserves the data sum).
Returns
-------
output : array-like
The resampled data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import block_reduce
>>> data = np.arange(16).reshape(4, 4)
>>> block_reduce(data, 2) # doctest: +SKIP
array([[10, 18],
[42, 50]])
>>> block_reduce(data, 2, func=np.mean) # doctest: +SKIP
array([[ 2.5, 4.5],
[ 10.5, 12.5]])
"""
from skimage.measure import block_reduce
data = np.asanyarray(data)
block_size = np.atleast_1d(block_size)
if data.ndim > 1 and len(block_size) == 1:
block_size = np.repeat(block_size, data.ndim)
if len(block_size) != data.ndim:
raise ValueError('`block_size` must be a scalar or have the same '
'length as `data.shape`')
block_size = np.array([int(i) for i in block_size])
size_resampled = np.array(data.shape) // block_size
size_init = size_resampled * block_size
# trim data if necessary
for i in range(data.ndim):
if data.shape[i] != size_init[i]:
data = data.swapaxes(0, i)
data = data[:size_init[i]]
data = data.swapaxes(0, i)
return block_reduce(data, tuple(block_size), func=func)
@support_nddata
def block_replicate(data, block_size, conserve_sum=True):
"""
Upsample a data array by block replication.
Parameters
----------
data : array_like
The data to be block replicated.
block_size : int or array_like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
conserve_sum : bool, optional
If `True` (the default) then the sum of the output
block-replicated data will equal the sum of the input ``data``.
Returns
-------
output : array_like
The block-replicated data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import block_replicate
>>> data = np.array([[0., 1.], [2., 3.]])
>>> block_replicate(data, 2) # doctest: +FLOAT_CMP
array([[0. , 0. , 0.25, 0.25],
[0. , 0. , 0.25, 0.25],
[0.5 , 0.5 , 0.75, 0.75],
[0.5 , 0.5 , 0.75, 0.75]])
>>> block_replicate(data, 2, conserve_sum=False) # doctest: +FLOAT_CMP
array([[0., 0., 1., 1.],
[0., 0., 1., 1.],
[2., 2., 3., 3.],
[2., 2., 3., 3.]])
"""
data = np.asanyarray(data)
block_size = np.atleast_1d(block_size)
if data.ndim > 1 and len(block_size) == 1:
block_size = np.repeat(block_size, data.ndim)
if len(block_size) != data.ndim:
raise ValueError('`block_size` must be a scalar or have the same '
'length as `data.shape`')
for i in range(data.ndim):
data = np.repeat(data, block_size[i], axis=i)
if conserve_sum:
data = data / float(np.prod(block_size))
return data
class Cutout2D:
"""
Create a cutout object from a 2D array.
The returned object will contain a 2D cutout array. If
``copy=False`` (default), the cutout array is a view into the
original ``data`` array, otherwise the cutout array will contain a
copy of the original data.
If a `~astropy.wcs.WCS` object is input, then the returned object
will also contain a copy of the original WCS, but updated for the
cutout array.
For example usage, see :ref:`cutout_images`.
.. warning::
The cutout WCS object does not currently handle cases where the
input WCS object contains distortion lookup tables described in
the `FITS WCS distortion paper
<http://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
Parameters
----------
data : `~numpy.ndarray`
The 2D data array from which to extract the cutout array.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to
the ``data`` array. The position can be specified either as
a ``(x, y)`` tuple of pixel coordinates or a
`~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a
required input.
size : int, array-like, `~astropy.units.Quantity`
The size of the cutout array along each axis. If ``size``
is a scalar number or a scalar `~astropy.units.Quantity`,
then a square cutout of ``size`` will be created. If
``size`` has two elements, they should be in ``(ny, nx)``
order. Scalar numbers in ``size`` are assumed to be in
units of pixels. ``size`` can also be a
`~astropy.units.Quantity` object or contain
`~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or
angular units. For all cases, ``size`` will be converted to
an integer number of pixels, rounding the the nearest
integer. See the ``mode`` keyword for additional details on
the final cutout size.
.. note::
If ``size`` is in angular units, the cutout size is
converted to pixels using the pixel scales along each
axis of the image at the ``CRPIX`` location. Projection
and other non-linear distortions are not taken into
account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input ``data`` array. If
``wcs`` is not `None`, then the returned cutout object will
contain a copy of the updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
cutout array and the input ``data`` array is sufficient.
For the ``'strict'`` mode, the cutout array has to be fully
contained within the ``data`` array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In
all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'``
mode, positions in the cutout array that do not overlap with
the ``data`` array will be filled with ``fill_value``. In
``'trim'`` mode only the overlapping elements are returned,
thus the resulting cutout array may be smaller than the
requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the
cutout array that do not overlap with the input ``data``.
``fill_value`` must have the same ``dtype`` as the input
``data`` array.
copy : bool, optional
If `False` (default), then the cutout data will be a view
into the original ``data`` array. If `True`, then the
cutout data will hold a copy of the original ``data`` array.
Attributes
----------
data : 2D `~numpy.ndarray`
The 2D cutout array.
shape : 2 tuple
The ``(ny, nx)`` shape of the cutout array.
shape_input : 2 tuple
The ``(ny, nx)`` shape of the input (original) array.
input_position_cutout : 2 tuple
The (unrounded) ``(x, y)`` position with respect to the cutout
array.
input_position_original : 2 tuple
The original (unrounded) ``(x, y)`` input position (with respect
to the original array).
slices_original : 2 tuple of slice objects
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the original array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
slices_cutout : 2 tuple of slice objects
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the cutout array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
xmin_original, ymin_original, xmax_original, ymax_original : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values. These
values are the same as those in `bbox_original`.
xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values. These values are
the same as those in `bbox_cutout`.
wcs : `~astropy.wcs.WCS` or `None`
A WCS object associated with the cutout array if a ``wcs``
was input.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import Cutout2D
>>> from astropy import units as u
>>> data = np.arange(20.).reshape(5, 4)
>>> cutout1 = Cutout2D(data, (2, 2), (3, 3))
>>> print(cutout1.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> print(cutout1.center_original)
(2.0, 2.0)
>>> print(cutout1.center_cutout)
(1.0, 1.0)
>>> print(cutout1.origin_original)
(1, 1)
>>> cutout2 = Cutout2D(data, (2, 2), 3)
>>> print(cutout2.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> size = u.Quantity([3, 3], u.pixel)
>>> cutout3 = Cutout2D(data, (0, 0), size)
>>> print(cutout3.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3))
>>> print(cutout4.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial')
>>> print(cutout5.data) # doctest: +FLOAT_CMP
[[nan nan nan]
[nan 0. 1.]
[nan 4. 5.]]
"""
def __init__(self, data, position, size, wcs=None, mode='trim',
fill_value=np.nan, copy=False):
if isinstance(position, SkyCoord):
if wcs is None:
raise ValueError('wcs must be input if position is a '
'SkyCoord')
position = skycoord_to_pixel(position, wcs, mode='all') # (x, y)
if np.isscalar(size):
size = np.repeat(size, 2)
# special handling for a scalar Quantity
if isinstance(size, u.Quantity):
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
if len(size) > 2:
raise ValueError('size must have at most two elements')
shape = np.zeros(2).astype(int)
pixel_scales = None
# ``size`` can have a mixture of int and Quantity (and even units),
# so evaluate each axis separately
for axis, side in enumerate(size):
if not isinstance(side, u.Quantity):
shape[axis] = int(np.round(size[axis])) # pixels
else:
if side.unit == u.pixel:
shape[axis] = int(np.round(side.value))
elif side.unit.physical_type == 'angle':
if wcs is None:
raise ValueError('wcs must be input if any element '
'of size has angular units')
if pixel_scales is None:
pixel_scales = u.Quantity(
proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis])
shape[axis] = int(np.round(
(side / pixel_scales[axis]).decompose()))
else:
raise ValueError('shape can contain Quantities with only '
'pixel or angular units')
data = np.asanyarray(data)
# reverse position because extract_array and overlap_slices
# use (y, x), but keep the input position
pos_yx = position[::-1]
cutout_data, input_position_cutout = extract_array(
data, tuple(shape), pos_yx, mode=mode, fill_value=fill_value,
return_position=True)
if copy:
cutout_data = np.copy(cutout_data)
self.data = cutout_data
self.input_position_cutout = input_position_cutout[::-1] # (x, y)
slices_original, slices_cutout = overlap_slices(
data.shape, shape, pos_yx, mode=mode)
self.slices_original = slices_original
self.slices_cutout = slices_cutout
self.shape = self.data.shape
self.input_position_original = position
self.shape_input = shape
((self.ymin_original, self.ymax_original),
(self.xmin_original, self.xmax_original)) = self.bbox_original
((self.ymin_cutout, self.ymax_cutout),
(self.xmin_cutout, self.xmax_cutout)) = self.bbox_cutout
# the true origin pixel of the cutout array, including any
# filled cutout values
self._origin_original_true = (
self.origin_original[0] - self.slices_cutout[1].start,
self.origin_original[1] - self.slices_cutout[0].start)
if wcs is not None:
self.wcs = deepcopy(wcs)
self.wcs.wcs.crpix -= self._origin_original_true
self.wcs._naxis = [self.data.shape[1], self.data.shape[0]]
if wcs.sip is not None:
self.wcs.sip = Sip(wcs.sip.a, wcs.sip.b,
wcs.sip.ap, wcs.sip.bp,
wcs.sip.crpix - self._origin_original_true)
else:
self.wcs = None
def to_original_position(self, cutout_position):
"""
Convert an ``(x, y)`` position in the cutout array to the original
``(x, y)`` position in the original large array.
Parameters
----------
cutout_position : tuple
The ``(x, y)`` pixel position in the cutout array.
Returns
-------
original_position : tuple
The corresponding ``(x, y)`` pixel position in the original
large array.
"""
return tuple(cutout_position[i] + self.origin_original[i]
for i in [0, 1])
def to_cutout_position(self, original_position):
"""
Convert an ``(x, y)`` position in the original large array to
the ``(x, y)`` position in the cutout array.
Parameters
----------
original_position : tuple
The ``(x, y)`` pixel position in the original large array.
Returns
-------
cutout_position : tuple
The corresponding ``(x, y)`` pixel position in the cutout
array.
"""
return tuple(original_position[i] - self.origin_original[i]
for i in [0, 1])
def plot_on_original(self, ax=None, fill=False, **kwargs):
"""
Plot the cutout region on a matplotlib Axes instance.
Parameters
----------
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `matplotlib.axes.Axes` instance
is used.
fill : bool, optional
Set whether to fill the cutout patch. The default is
`False`.
kwargs : optional
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `matplotlib.axes.Axes` instance
The matplotlib Axes instance constructed in the method if
``ax=None``. Otherwise the output ``ax`` is the same as the
input ``ax``.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
kwargs['fill'] = fill
if ax is None:
ax = plt.gca()
height, width = self.shape
hw, hh = width / 2., height / 2.
pos_xy = self.position_original - np.array([hw, hh])
patch = mpatches.Rectangle(pos_xy, width, height, 0., **kwargs)
ax.add_patch(patch)
return ax
@staticmethod
def _calc_center(slices):
"""
Calculate the center position. The center position will be
fractional for even-sized arrays. For ``mode='partial'``, the
central position is calculated for the valid (non-filled) cutout
values.
"""
return tuple(0.5 * (slices[i].start + slices[i].stop - 1)
for i in [1, 0])
@staticmethod
def _calc_bbox(slices):
"""
Calculate a minimal bounding box in the form ``((ymin, ymax),
(xmin, xmax))``. Note these are pixel locations, not slice
indices. For ``mode='partial'``, the bounding box indices are
for the valid (non-filled) cutout values.
"""
# (stop - 1) to return the max pixel location, not the slice index
return ((slices[0].start, slices[0].stop - 1),
(slices[1].start, slices[1].stop - 1))
@lazyproperty
def origin_original(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the original array. For ``mode='partial'``, the
origin pixel is calculated for the valid (non-filled) cutout
values.
"""
return (self.slices_original[1].start, self.slices_original[0].start)
@lazyproperty
def origin_cutout(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the cutout array. For ``mode='partial'``, the origin
pixel is calculated for the valid (non-filled) cutout values.
"""
return (self.slices_cutout[1].start, self.slices_cutout[0].start)
@lazyproperty
def position_original(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the original array.
"""
return (_round(self.input_position_original[0]),
_round(self.input_position_original[1]))
@lazyproperty
def position_cutout(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the cutout array.
"""
return (_round(self.input_position_cutout[0]),
_round(self.input_position_cutout[1]))
@lazyproperty
def center_original(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the original array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_original)
@lazyproperty
def center_cutout(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the cutout array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_cutout)
@lazyproperty
def bbox_original(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_original)
@lazyproperty
def bbox_cutout(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_cutout)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.