repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pratapvardhan/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
siosio/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_vars.py | 2 | 24905 | """ pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import math
import pickle
from _pydev_imps._pydev_saved_modules import thread
from _pydev_bundle.pydev_imports import quote
from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, NUMPY_NUMERIC_TYPES
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
try:
from collections import OrderedDict
except:
OrderedDict = dict
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, execfile
from _pydevd_bundle.pydevd_utils import to_string, VariableWithOffset
SENTINEL_VALUE = []
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return thread_id in AdditionalFramesContainer.additional_frames
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_current_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if thread_id in AdditionalFramesContainer.additional_frames:
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
# Note: commented this error message out (it may commonly happen
# if a message asking for a frame is issued while a thread is paused
# but the thread starts running before the message is actually
# handled).
# Leaving code to uncomment during tests.
# err_msg = '''find_frame: frame not found.
# Looking for thread_id:%s, frame_id:%s
# Current thread_id:%s, available frames:
# %s\n
# ''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
#
# sys.stderr.write(err_msg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2).
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def get_offset(attrs):
"""
Extract offset from the given attributes.
:param attrs: The string of a compound variable fields split by tabs.
If an offset is given, it must go the first element.
:return: The value of offset if given or 0.
"""
offset = 0
if attrs is not None:
try:
offset = int(attrs.split('\t')[0])
except ValueError:
pass
return offset
def resolve_compound_variable_fields(thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
:note: PyCharm supports progressive loading of large collections and uses the `attrs`
parameter to pass the offset, e.g. 300\t\\obj\tattr1\tattr2 should return
the value of attr2 starting from the 300th element. This hack makes it possible
to add the support of progressive loading without extending of the protocol.
"""
offset = get_offset(attrs)
orig_attrs, attrs = attrs, attrs.split('\t', 1)[1] if offset else attrs
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, orig_attrs,))
traceback.print_exc()
def resolve_var_object(var, attrs):
"""
Resolve variable's attribute
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a value of resolved variable's attribute
"""
if attrs is not None:
attr_list = attrs.split('\t')
else:
attr_list = []
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_var_object_fields(var, attrs):
"""
Resolve compound variable by its object and attributes
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
offset = get_offset(attrs)
attrs = attrs.split('\t', 1)[1] if offset else attrs
attr_list = attrs.split('\t')
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if value is SENTINEL_VALUE:
# It is possible to have variables with names like '.0', ',,,foo', etc in scope by setting them with
# `sys._getframe().f_locals`. In particular, the '.0' variable name is used to denote the list iterator when we stop in
# list comprehension expressions. This variable evaluates to 0. by `eval`, which is not what we want and this is the main
# reason we have to check if the expression exists in the global and local scopes before trying to evaluate it.
value = frame.f_locals.get(expression) or frame.f_globals.get(expression) or eval(expression, frame.f_globals, frame.f_locals)
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = 100
def array_to_xml(array, name, roffset, coffset, rows, cols, format):
array, xml, r, c, f = array_to_meta_xml(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)), format)
return xml
class ExceedingArrayDimensionsException(Exception):
pass
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise ExceedingArrayDimensionsException()
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in NUMPY_NUMERIC_TYPES:
bounds = (array.min(), array.max())
return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format
def array_default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
def get_label(label):
return str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
format = format.replace('%', '')
if not format:
if num_rows > 0 and num_cols == 1: # series or data frame with one column
try:
kind = df.dtype.kind
except AttributeError:
try:
kind = df.dtypes[0].kind
except IndexError:
kind = 'O'
format = array_default_format(kind)
else:
format = array_default_format('f')
xml = slice_to_xml(name, num_rows, num_cols, format, "", (0, 0))
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in NUMPY_NUMERIC_TYPES:
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in NUMPY_NUMERIC_TYPES else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
def col_to_format(c):
return format if dtypes[c] in NUMPY_NUMERIC_TYPES and format else array_default_format(dtypes[c])
iat = df.iat if dim == 1 or len(df.columns.unique()) == len(df.columns) else df.iloc
xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
xml += array_data_to_xml(rows, cols, lambda r: (("%" + col_to_format(c)) % (iat[r, c] if dim > 1 else iat[r])
for c in range(cols)), format)
return xml
def array_data_to_xml(rows, cols, get_row, format):
xml = "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % to_string(row)
for value in get_row(row):
xml += var_to_xml(value, '', format=format)
return xml
def slice_to_xml(slice, rows, cols, format, type, bounds):
return '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, quote(format), type, bounds[1], bounds[0])
def header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
xml = "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
for col in range(cols):
col_label = quote(get_label(df.axes[1].values[col]) if dim > 1 else str(col))
bounds = col_bounds[col]
col_format = "%" + col_to_format(col)
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), col_label, dtypes[col], col_to_format(col), col_format % bounds[1], col_format % bounds[0])
for row in range(rows):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % (str(row), get_label(df.axes[0].values[row]))
xml += "</headerdata>\n"
return xml
def is_able_to_format_number(format):
try:
format % math.pi
except Exception:
return False
return True
TYPE_TO_XML_CONVERTERS = {"ndarray": array_to_xml, "DataFrame": dataframe_to_xml, "Series": dataframe_to_xml}
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
format = format if is_able_to_format_number(format) else '%'
if type_name in TYPE_TO_XML_CONVERTERS:
return "<xml>%s</xml>" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("type %s not supported" % type_name)
| apache-2.0 |
silky/airflow | setup.py | 1 | 2476 | from setuptools import setup, find_packages
import sys
# Kept manually in sync with airflow.__version__
version = '1.5.1'
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'pyhs2>=0.6.0',
]
mysql = ['mysql-python>=1.2.5']
postgres = ['psycopg2>=2.6']
optional = ['librabbitmq>=1.6.1']
samba = ['pysmbclient>=0.1.3']
druid = ['pydruid>=0.2.1']
s3 = ['boto>=2.36.0']
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.13.0']
hdfs = ['snakebite>=2.4.13']
slack = ['slackclient>=0.15']
crypto = ['cryptography>=0.9.3']
oracle = ['cx_Oracle>=5.1.2']
all_dbs = postgres + mysql + hive + mssql + hdfs
devel = all_dbs + doc + samba + s3 + ['nose'] + slack + crypto + oracle
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini']},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.0, <0.9',
'celery>=3.1.17, <4.0',
'chartkick>=0.4.2, < 0.5',
'dill>=0.2.2, <0.3',
'flask>=0.10.1, <0.11',
'flask-admin==1.2.0',
'flask-cache>=0.13.1, <0.14',
'flask-login>=0.2.11, <0.3',
'flower>=0.7.3, <0.8',
'future>=0.15.0, <0.16',
'gunicorn>=19.3.0, <20.0',
'jinja2>=2.7.3, <3.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.15.2, <0.16',
'pygments>=2.0.1, <3.0',
'python-dateutil>=2.3, <3',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8, <0.10',
'statsd>=3.0.1, <4.0',
'thrift>=0.9.2, <0.10',
],
extras_require={
'all': devel + optional,
'all_dbs': all_dbs,
'devel': devel,
'doc': doc,
'druid': druid,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'mssql': mssql,
'mysql': mysql,
'postgres': postgres,
's3': s3,
'samba': samba,
'slack': slack,
'crypto': crypto,
'oracle': oracle,
},
author='Maxime Beauchemin',
author_email='[email protected]',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
)
| apache-2.0 |
juanremi/datasets-to-CF-recsys | bigquery/bigquery2.py | 1 | 2001 | import pandas as pd
import numpy as np
from scipy.sparse import coo_matrix
from implicit.als import AlternatingLeastSquares
import requests
header = ['user_id', 'item_id', 'rating']
df2 = pd.read_csv('bigquery-frutas-sin-items-no-vistos.csv', usecols=[0, 1, 2], names=header)
df = df2[['user_id', 'item_id']] # Get rid of unnecessary info
# map each item and user to a unique numeric value
data_user = df.user_id.astype("category")
data_item = df.item_id.astype("category")
stars = coo_matrix((np.ones(df.shape[0]),
(data_item.cat.codes.copy(),
data_user.cat.codes.copy())))
model = AlternatingLeastSquares(factors=50,
regularization=0.01,
dtype=np.float64,
iterations=50)
confidence = 40
model.fit(confidence * stars)
repos = dict(enumerate(data_item.cat.categories))
repo_ids = {r: i for i, r in repos.iteritems()}
#print [(repos[r], s) for r, s in model.similar_items(repo_ids['manzana'])]
def user_stars(user):
repos = []
repos = df.item_id.loc[df.user_id == str(user)]
return repos
def user_items(u_stars):
star_ids = [repo_ids[s] for s in u_stars if s in repo_ids]
data = [confidence for _ in star_ids]
rows = [0 for _ in star_ids]
shape = (1, model.item_factors.shape[0])
return coo_matrix((data, (rows, star_ids)), shape=shape).tocsr()
juan = user_items(user_stars("juan"))
def recommend(user_items):
recs = model.recommend(userid=0, user_items=user_items, recalculate_user=True)
return [(repos[r], s) for r, s in recs]
def explain(user_items, repo):
_, recs, _ = model.explain(userid=0, user_items=user_items, itemid=repo_ids[repo])
return [(repos[r], s) for r, s in recs]
print '----------ITEMS STARRED BY THE USER: juan'
print user_stars("juan")
print '----------ITEMS TO RECOMMEND TO THE USER: juan'
print recommend(juan)
print '----------EXPLAIN for: manzana'
print explain(juan, 'manzana')
| gpl-3.0 |
rvmisra/coding_shorts_python | Methylation_no_motifs.py | 1 | 1794 | #!/usr/bin/python
#Methylation events identified for which a motif is NOT present
import sys
import numpy as np
import pandas as pd
from itertools import ifilter
#reads input filename from command line
filename = sys.argv[-1]
#this sets the number of rows to display/print - otherwise tructates to 64 rows
pd.set_option('display.max_rows',100000)
#column variable names
my_cols=['col1','col2','col3','col4','col5','col6','col7','col8','col9','col10','col11','col12','col13','col14','col15','col16','col17','col18','col19','col20','col21','col22','col23','col24','col25']
#creates a temp file. The input file is filtered based on the if statement, output is written to temp file
open('temp.txt','w').writelines(line for line in open(filename) if 'modified_base' not in line and 'motif' not in line)
#using pandas - the temp file is read, split by the delimeters into column variables at the top and the first row skipped
df = pd.read_csv('temp.txt',sep='\t|=|;',skiprows=(1),header=None,names=my_cols, engine='python')
#all columns are changed to numeric - any errors are ignored e.g. column only has strings
df.apply(pd.to_numeric, errors='ignore')
#goes to column 16 = IPD ratio values and filters by the set value
df=df.loc[df['col16'] > 1.7]
#output from IPD ratio filter is written to another temp file called np.txt
df.to_csv('np.txt', sep='\t')
#temp file is read, split by tab into column variables at the top, first row skipped
df= pd.read_csv('np.txt',sep='\t',skiprows=(1),header=None,names=my_cols, engine='python')
#sorts column 4 = modification start position
df=df.sort_values('col4')
#print df.col12
#creates bins
bins=np.arange(0,6000000,5000)
#numpy used to split col4 into the bins
ind=np.digitize(df['col4'],bins)
#counts number of rows per bin
print df.groupby(ind).size()
| gpl-3.0 |
andycasey/original-oracle | oracle/plot.py | 1 | 24730 | # coding: utf-8
""" Visualise results """
__author__ = "Andy Casey <[email protected]>"
import logging
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator, LinearLocator
import numpy as np
from scipy import stats
import acor
logger = logging.getLogger("oracle")
def transition(data, model, synthetic_spectra, wavelength, data_color="#000000",
model_color=u"#4682b4",title=None):
"""
Plot a transition that has been fit by the ThereminSolver (and maybe by others
in the future).
:param data:
The observed spectrum.
:type data:
:class:`oracle.specutils.Spectrum1D`
"""
fig, ax = plt.subplots()
mask = model.mask(data.disp, 0.)
# Just plot surrounding region.
indices = data.disp.searchsorted([wavelength - 2, wavelength + 2])
disp, flux = data.disp.__getslice__(*indices), data.flux.__getslice__(*indices)
mask = model.mask(disp, 0.) # TODO
# TODO: CHANGE TO CONTINUUM
ax.plot([disp[0], disp[-1]], [1, 1], c="k", zorder=-1)
ax.plot(disp, flux, "-.", c=data_color)
ax.plot(disp, flux * mask, "-", c=data_color)
# Plot synthetic spectra
ax.plot(synthetic_spectra[:, 0], synthetic_spectra[:, 1], c=model_color)
ax.plot(synthetic_spectra[:, 0], synthetic_spectra[:, 2], "-.", c=model_color)
ax.axvline(wavelength, linestyle="-", c="#666666", zorder=-1)
if title is not None:
ax.set_title(title)
ax.set_ylim(0, 1.1)
ax.set_xlim(wavelength - 1, wavelength + 1)
ax.xaxis.set_major_locator(LinearLocator(5))
xticks = np.arange(wavelength - 1.0, wavelength + 1.5, 0.5)
ax.set_xticks(xticks)
ax.set_xticklabels(["{0:.2f}".format(each) for each in xticks])
ax.set_xlabel("Wavelength [$\AA$]")
ax.set_ylabel("Flux")
return fig
def _transition(data, model_spectra, transition, title=None):
import matplotlib.pyplot as plt
if free_broadening:
fig, ax = plt.subplots()
ax.scatter(wavelengths, [each[0][1] for each in line_results])
ax.set_xlabel("wavelength")
ax.set_ylabel("resolution")
fig.savefig("resolutions.png")
transition_indices = np.array(sum([self._transition_mapping[i] for i in xrange(len(self.data))], []))
fig, axes = plt.subplots(2)
ordered_transitions = self.atomic_lines[transition_indices]
wavelengths = np.array(wavelengths)
abundances = np.array([each[0][0] for each in line_results])
chi_sqs = np.array([each[1] for each in line_results])
equivalent_widths = np.array([each[2] for each in line_results])
ok = (equivalent_widths > 0)
scat = axes[0].scatter(ordered_transitions["excitation_potential"][ok], abundances[ok], c=chi_sqs[ok], cmap='YlOrRd')
scat2 = axes[1].scatter(np.log(equivalent_widths/wavelengths)[ok], abundances[ok], c=chi_sqs[ok], cmap='YlOrRd')
cbar = plt.colorbar(scat, cmap='YlOrRd')
cbar.set_label("$\chi^2$")
fig.savefig("excitation.png")
fig, axes = plt.subplots(len(self.data))
for i, (ax, observed, initial, optimal) \
in enumerate(zip(axes, self.data, initial_fluxes, optimal_fluxes)):
ax.plot(observed.disp, observed.flux, 'k')
ax.plot(observed.disp, initial, 'r', label='initial')
ax.plot(observed.disp, optimal, 'g', label='final')
#ax.plot(observed.disp, final, 'b', label='real final')
#ax.plot(blended[:, 0], blended[:,1], c="#666666", label='blended')
#[ax.plot(each[:,0], each[:,1], 'g') for each in opt_spectra[i]]
ax.set_xlim(observed.disp[0], observed.disp[-1])
ax.legend()
for i, transition in enumerate(self.atomic_lines):
if i not in sum(self._transition_mapping.values(), []): continue
# Which thing are we in?
for j, indices in self._transition_mapping.iteritems():
if i in indices: break
axes[j].axvline(transition["wavelength"], 0, 1.2, c="b")
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(disp, flux, "-.", c="k")
ax.plot(disp,flux * self.mask(disp, z, use_cached=False), 'k')
chi, r_chi, eqw, bar = chi_sq(xopt, True)
ax.set_title("$\chi^2 = {0:.2f}$".format(r_chi))
ax.plot(disp, bar[:,1], 'b')
ax.plot(disp,bar[:,2], ":", c="b")
ax.set_xlim(wavelength_region)
ax.axvline(wavelength, c="g")
fig.savefig("fit-{0:.2f}.png".format(transition["wavelength"]))
#if wavelength > 4732:
# raise a
plt.close("all")
"""
None
def acceptance_fractions(mean_acceptance_fractions, burn=None, **kwargs):
"""
Plot the mean acceptance fractions as a function of MCMC step.
:param mean_acceptance_fractions:
Mean acceptance fractions at each MCMC step.
:type mean_acceptance_fractions:
:class:`numpy.array`
:param burn:
The number of MCMC steps discarded as burn in.
:type burn:
int
:returns:
A figure showing the mean acceptance fractions as a function of MCMC step.
:rtype:
:class:`matplotlib.Figure`
"""
plot_kwargs = {"c": "k", "lw": 2}
plot_kwargs.update(kwargs)
fig, ax = plt.subplots()
N = len(mean_acceptance_fractions)
ax.plot(np.arange(1, 1 + N), mean_acceptance_fractions, **plot_kwargs)
if burn is not None and (N >= burn > 0):
ax.axvline(burn, ":")
ax.set_xlabel("Step")
ax.set_ylabel("<f_a>")
return fig
def spectrum_comparison(data, model, theta=None, model_spectra=None, figsize=None,
observed_color=u"k", model_color=u"#4682b4", mask_color=u"r", mask_alpha=0.1):
"""
Produce a comparison plot showing the observed and model spectra.
:param data:
A single observed spectrum, or list of observed spectra.
:type data:
:class:`specutils.Spectrum1D` object or a list of :class:`specutils.Spectrum1D`
objects
:param model:
The model class.
:type model:
:models.Model:
:param theta: [optional]
The :math:`\Theta` values to use to calculate model spectra for
comparison. Either ``theta`` or ``model_spectra`` are required.
:type theta:
dict
:param model_spectra: [optional]
The model spectra to show. Either ``theta`` or ``model_spectra``
are required.
:type model_spectra:
list of :class:`specutils.Spectrum1D` objects
:param figsize: [optional]
A 2 length tuple (width, height) of the figure size in inches.
:type figsize:
tuple
[TODO]: other docs
:returns:
A spectrum comparison figure.
:rtype:
:class:`matplotlib.Figure`
"""
if not isinstance(data, (tuple, list)):
data = [data]
K = len(data)
if figsize is None:
figsize = (25, 4 * len(data))
# Use speedy synth kwargs if the model allows:
try:
synth_kwargs = model._get_speedy_synth_kwargs(data)
except:
synth_kwargs = {}
if model_spectra is None:
model_spectra = model(data, synth_kwargs=synth_kwargs, **theta)
if not isinstance(model_spectra, (list, tuple)):
model_spectra = [model_spectra]
fig, axes = plt.subplots(K, figsize=figsize)
axes = [axes] if K == 1 else axes
mask = np.array(model.config.get("mask", []))
# Redshift all mask wavelengths where necessary
# [TODO] Need to allow for different redshifts in each channel.
for ax, observed_spectrum, model_spectrum \
in zip(axes, data, model_spectra):
# Plot the spectra
if model_spectrum is not None:
ax.plot(model_spectrum[:, 0], model_spectrum[:, 1], model_color)
ax.fill_between(observed_spectrum.disp,
observed_spectrum.flux - observed_spectrum.variance**0.5,
observed_spectrum.flux + observed_spectrum.variance**0.5,
facecolor="#eeeeee", edgecolor="#666666", zorder=-1)
ax.plot(observed_spectrum.disp, observed_spectrum.flux, observed_color)
# Show the mask
obs_start, obs_end = observed_spectrum.disp[0], observed_spectrum.disp[-1]
for start, end in mask:
if obs_end >= start and start >= obs_start \
or obs_end >= end and end >= obs_start:
# Show the mask in this axes.
ax.axvspan(start, end, facecolor=mask_color, alpha=mask_alpha,
edgecolor='none')
ax.set_xlim(obs_start, obs_end)
ax.set_ylabel("Flux, $F_\lambda$")
ax.set_xlabel("Wavelength, $\lambda$ ($\AA$)")
return fig
def projection(sampler, model, data, n=100, extents=None, fig=None, figsize=None,
mask_color="r", mask_alpha=0.1):
"""
Project the maximum likelihood values and sampled posterior points as spectra.
:param sampler:
The sampler employed.
:type sampler:
:class:`emcee.EnsembleSampler`
:param model:
The model employed.
:type model:
:class:`sick.models.Model`
:param data:
The observed spectra.
:type data:
iterable of :class:`sick.specutils.Spectrum1D` objects
:param extents: [optional]
The wavelength extents to plot for each channel in the form of [(min_chan_1,
max_chan_1), ..., (min_chan_N, max_chan_N)]
:type extents:
tuple or None
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:param figsize: [optional]
The figure size (x-dimension, y-dimension) in inches.
:type figsize:
tuple or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:raise TypeError:
If the ``data`` are not provided in the correct type.
:returns:
The projection figure.
:rtype:
:class:`maplotlib.Figure`
"""
if not isinstance(data, (tuple, list)) or \
any([not isinstance(each, specutils.Spectrum1D) for each in data]):
raise TypeError("Data must be a list-type of Spectrum1D objects.")
K = len(data)
factor = 3.0
lbdim = 0.5 * factor
trdim = 0.2 * factor
whspace = 0.10
width = max([len(each.disp) for each in data])/150.
height = factor*K + factor * (K - 1.) * whspace
dimy = lbdim + height + trdim
dimx = lbdim + width + trdim
if figsize is None:
figsize = (dimx, dimy)
if fig is None:
fig, axes = plt.subplots(K, 1, figsize=figsize)
else:
try:
axes = np.array(fig.axes).reshape((1, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"parameters K={1}".format(len(fig.axes), K))
# Find the most probable sampled theta and compute spectra for it
max_lnprob_index = np.argmax(sampler.lnprobability.flatten())
max_lnprob_theta = sampler.flatchain[max_lnprob_index]
max_lnprob_fluxes = [model(dispersion=[spectrum.disp for spectrum in data],
**dict(zip(model.parameters, max_lnprob_theta)))[:,1]]
if n > 0:
# Draw samples from sampler.chain and compute spectra for them
sampled_fluxes = []
n_samples = len(sampler.flatchain)
for i in range(n):
sampled_theta = dict(zip(
model.parameters,
sampler.flatchain[np.random.randint(0, n_samples)]
))
try:
sampler_flux = [model(dispersion=[s.disp for s in data],
**sampled_theta)[:,1]]
except:
continue
else:
sampled_fluxes.append(sampler_flux)
if len(data) == 1:
axes = [axes]
for k, (ax, max_lnprob_flux, observed_spectrum) in enumerate(zip(axes, max_lnprob_fluxes, data)):
# Draw the random samples from the chain
if n > 0:
for sampled_flux in sampled_fluxes:
ax.plot(observed_spectrum.disp, sampled_flux[k], color=u"#4682b4", alpha=0.1)
# Draw the ML spectra
ax.plot(observed_spectrum.disp, max_lnprob_flux, color=u"#4682b4", lw=2)
# Plot the data
ax.plot(observed_spectrum.disp, observed_spectrum.flux, color="k")
ax.fill_between(observed_spectrum.disp,
observed_spectrum.flux - observed_spectrum.variance**0.5,
observed_spectrum.flux + observed_spectrum.variance**0.5,
facecolor='#eeeeee', edgecolor="#666666", zorder=-1)
# Show the mask
mask = np.array(model.config.get("mask", []))
obs_start, obs_end = observed_spectrum.disp[0], observed_spectrum.disp[-1]
for start, end in mask:
if obs_end >= start and start >= obs_start \
or obs_end >= end and end >= obs_start:
# Show the mask in this axes.
ax.axvspan(start, end, facecolor=mask_color, alpha=mask_alpha,
edgecolor='none')
# By default only show common overlap between the model and spectral data
if extents is None:
finite_data = np.isfinite(observed_spectrum.flux)
finite_model = np.isfinite(max_lnprob_flux)
x_extent = [
np.max([observed_spectrum.disp[indices][0] for indices in (finite_model, finite_data)]),
np.min([observed_spectrum.disp[indices][-1] for indices in (finite_model, finite_data)]),
]
indices = observed_spectrum.disp.searchsorted(x_extent)
finite_observed_flux = observed_spectrum.flux[indices[0]:indices[1]]
y_extent = [
0.9 * np.min(finite_observed_flux[np.isfinite(finite_observed_flux)]),
1.1 * np.max(finite_observed_flux[np.isfinite(finite_observed_flux)])
]
ax.set_xlim(x_extent)
ax.set_ylim(y_extent)
else:
ax.set_xlim(extents[k][0])
ax.set_ylim(extents[k][1])
# Labels and ticks
if not (k < K - 1):
ax.set_xlabel("Wavelength, $\lambda$ ($\AA$)")
ax.set_ylabel("Flux, $F_\lambda$")
ax.yaxis.set_label_coords(-0.05, 0.5)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
[l.set_rotation(45) for l in ax.get_yticklabels()]
return fig
def autocorrelation(xs, burn_in, labels=None, fig=None):
"""
Create a plot showing the autocorrelation in each parameter.
:param xs:
The sampled values. This should be a three dimensional array of size
``(n_walkers, n_steps, n_parameters)``
:type xs:
:class:`numpy.array`
:param burn_in: [optional]
The number of steps used for burn-in.
:type burn_in:
int
:param labels: [optional]
The labels for each parameter.
:type labels:
tuple of str
:param fig: [optional]
Figure class to use for the plotting.
:type fig:
:class:`matplotlib.Figure`
:returns:
A figure showing the autocorrelation in each parameter at every MCMC step.
:rtype:
:class:`matplotlib.Figure`
"""
n_walkers, n_steps, K = xs.shape
factor = 2.0
lbdim = 0.5 * factor
trdim = 0.2 * factor
whspace = 0.10
width = 15.
height = factor*K + factor * (K - 1.) * whspace
dimy = lbdim + height + trdim
dimx = lbdim + width + trdim
if fig is None:
fig, axes = plt.subplots(K, 1, figsize=(dimx, dimy))
else:
try:
axes = np.array(fig.axes).reshape((1, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"parameters K={1}".format(len(fig.axes), K))
lm = lbdim / dimx
bm = lbdim / dimy
trm = (lbdim + height) / dimy
fig.subplots_adjust(left=lm, bottom=bm, right=trm, top=trm,
wspace=whspace, hspace=whspace)
for k, ax in enumerate(axes):
ax.plot(acor.function(np.mean(xs[:, burn_in:, k], axis=0)), color="k")
if burn_in is not None:
ax.axvline(burn_in, color="k", linestyle=":")
ax.set_xlim(0, n_steps)
if k < K - 1:
ax.set_xticklabels([])
else:
ax.set_xlabel("Step")
ax.yaxis.set_major_locator(MaxNLocator(4))
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[k])
ax.yaxis.set_label_coords(-0.05, 0.5)
return fig
def chains(xs, labels=None, truths=None, truth_color=u"#4682b4", burn_in=None,
alpha=0.5, fig=None):
"""
Create a plot showing the walker values for each parameter at every step.
Args:
xs (array_like) : The samples. This should be a 3D array of size
(n_walkers, n_steps, n_parameters)
labels (iterable, optional) : A list of names for the parameters.
truths (iterable, optional) : A list of reference values to indicate on
the plots.
truth_color (str, optional) : A `matplotlib` style color for the `truths`
markers.
burn_in (int, optional) : A reference step to indicate on the plots.
alpha (float between [0, 1], optional) : Transparency of individual walker
lines.
fig (`matplotlib.Figure`, optional) : Overplot onto the provided figure object.
Returns:
A `matplotlib.Figure` object.
"""
n_walkers, n_steps, K = xs.shape
if labels is not None:
assert len(labels) == K
if truths is not None:
assert len(truths) == K
factor = 2.0
lbdim = 0.5 * factor
trdim = 0.2 * factor
whspace = 0.10
width = 15.
height = factor*K + factor * (K - 1.) * whspace
dimy = lbdim + height + trdim
dimx = lbdim + width + trdim
if fig is None:
fig, axes = plt.subplots(K, 1, figsize=(dimx, dimy))
else:
try:
axes = np.array(fig.axes).reshape((1, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"parameters K={1}".format(len(fig.axes), K))
lm = lbdim / dimx
bm = lbdim / dimy
trm = (lbdim + height) / dimy
fig.subplots_adjust(left=lm, bottom=bm, right=trm, top=trm,
wspace=whspace, hspace=whspace)
for k, ax in enumerate(axes):
for walker in range(n_walkers):
ax.plot(xs[walker, :, k], color="k", alpha=alpha)
if burn_in is not None:
ax.axvline(burn_in, color="k", linestyle=":")
if truths is not None:
ax.axhline(truths[k], color=truth_color, lw=2)
ax.set_xlim(0, n_steps)
if k < K - 1:
ax.set_xticklabels([])
else:
ax.set_xlabel("Step")
ax.yaxis.set_major_locator(MaxNLocator(4))
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[k])
ax.yaxis.set_label_coords(-0.05, 0.5)
return fig
def balance(atomic_data_table, title=None):
"""
Plot the derived abundances as a function of excitation potential and line
strength, as typically done in classical analysis approaches.
:param atomic_data_table:
A record array table containing the wavelength, species, lower excitation
potential, oscillator strength (loggf), equivalent width, and abundance
of the atomic transitions used for stellar parameter determination.
"""
fig, axes = plt.subplots(2)
excitation_ax, line_strength_ax = axes
if len(set(atomic_data_table["atomic_number"].astype(int))) > 1:
logger.warn("Multiple elements found in atomic data table. These will "\
"all be plotted as the same symbol for the moment.")
# Seperate neutral and ionised species.
neutral = (atomic_data_table["ionised"] == 1)
ionised = ~neutral
# Plot the excitation potential axes
try:
uncertainties = np.any(np.isfinite(np.vstack([
atomic_data_table["u_pos_abundance"], atomic_data_table["u_neg_abundance"]])))
except ValueError:
uncertainties = False
if uncertainties:
# Plot uncertainties
excitation_ax.errorbar(atomic_data_table["excitation_potential"],
atomic_data_table["abundance"],
yerr=(np.abs(atomic_data_table["u_neg_abundance"]), atomic_data_table["u_pos_abundance"]),
fmt=None, ecolor="k")
excitation_ax.scatter(atomic_data_table["excitation_potential"][neutral],
atomic_data_table["abundance"][neutral], facecolor="k", zorder=10)
excitation_ax.scatter(atomic_data_table["excitation_potential"][ionised],
atomic_data_table["abundance"][ionised], facecolor="b", zorder=10)
# Measure slopes by linear regression [TODO] and show them
if uncertainties:
y_uncertainty = np.nanmax(np.abs(np.vstack([
atomic_data_table["u_pos_abundance"], atomic_data_table["u_neg_abundance"]])), axis=0)
assert len(y_uncertainty) == len(atomic_data_table)
m, b = 0, 1
"""
m, b = line.fit(
x=atomic_data_table["excitation_potential"][neutral],
y=atomic_data_table["abundance"][neutral],
y_uncertainty=y_uncertainty[neutral], full_output=True)[:2]
"""
else:
m, b = 0, 1
"""
m, b = line.fit(
x=atomic_data_table["excitation_potential"][neutral],
y=atomic_data_table["abundance"][neutral], full_output=True)[:2]
"""
x_limits = np.array(excitation_ax.get_xlim())
y_limits = excitation_ax.get_ylim()
excitation_ax.plot(x_limits, [np.mean(atomic_data_table["abundance"])] * 2, c="#666666")
excitation_ax.plot(x_limits, m * x_limits + b, ":", c="k", zorder=-1)
excitation_ax.set_xlim(x_limits)
excitation_ax.set_ylim(y_limits)
excitation_ax.set_xlabel("Lower Excitation Potential (eV)")
excitation_ax.set_ylabel("$\\log_{\\epsilon}({\\rm Fe})$")
# [TODO]
# Quote the slope on the axes.
#logger.info("Slope on excitation balance plot: {0:.4f}".format(m))
# Plot the line strength axes
reduced_equivalent_width = np.log(atomic_data_table["equivalent_width"]/atomic_data_table["wavelength"])
if uncertainties:
x_pos_uncertainties = np.log(
(atomic_data_table["equivalent_width"] + atomic_data_table["u_pos_equivalent_width"]) \
/atomic_data_table["wavelength"]) - reduced_equivalent_width
x_neg_uncertainties = np.abs(np.log(
(atomic_data_table["equivalent_width"] + atomic_data_table["u_neg_equivalent_width"]) \
/atomic_data_table["wavelength"]) - reduced_equivalent_width)
line_strength_ax.errorbar(reduced_equivalent_width,
atomic_data_table["abundance"],
xerr=(x_neg_uncertainties, x_pos_uncertainties),
yerr=(np.abs(atomic_data_table["u_neg_abundance"]), atomic_data_table["u_pos_abundance"]),
fmt=None, ecolor="k")
m, b = 0, 1
"""
m, b = line.fit(
x=reduced_equivalent_width[neutral],
y=atomic_data_table["abundance"][neutral],
y_uncertainty=y_uncertainty[neutral], full_output=True)[:2]
"""
else:
m, b = 0, 1
"""
m, b = line.fit(
x=reduced_equivalent_width[neutral],
y=atomic_data_table["abundance"][neutral], full_output=True)[:2]
"""
line_strength_ax.scatter(
reduced_equivalent_width[neutral], atomic_data_table["abundance"][neutral],
facecolor="k", zorder=10)
line_strength_ax.scatter(
reduced_equivalent_width[ionised], atomic_data_table["abundance"][ionised],
facecolor="b", zorder=10)
# Measure slopes by linear regression [TODO] and show them
#logger.info("Slope on the reduced equivalent width plot: {0:.4f}".format(m))
x_limits = np.array(line_strength_ax.get_xlim())
line_strength_ax.plot(x_limits, [np.mean(atomic_data_table["abundance"])] * 2,
c="#666666", zorder=-1)
line_strength_ax.plot(x_limits, m * x_limits + b, ":", c="k", zorder=-1)
line_strength_ax.set_xlim(x_limits)
line_strength_ax.set_ylim(y_limits)
line_strength_ax.set_xlabel("Reduced Equivalent Width")
line_strength_ax.set_ylabel("$\\log_{\\epsilon}({\\rm Fe})$")
ionisation_difference = np.nanmean(atomic_data_table["abundance"][neutral]) \
- np.nanmean(atomic_data_table["abundance"][ionised])
logger.info("Mean neutral and ionised abundance difference: {0:.3f} dex".format(
ionisation_difference))
if title is not None:
excitation_ax.set_title(title)
return fig | mit |
ironmussa/Optimus | examples/10_min_from_pandas_to_spark_with_optimus.py | 1 | 9869 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.1
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hi, Are you in Google Colab?
# In Google colab you can easily run Optimus. If you not you may want to go here
# https://colab.research.google.com/github/ironmussa/Optimus/blob/master/examples/10_min_from_spark_to_pandas_with_optimus.ipynb
# Install Optimus all the dependencies.
import sys
if 'google.colab' in sys.modules:
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
!wget -q https://archive.apache.org/dist/spark/spark-2.4.1/spark-2.4.1-bin-hadoop2.7.tgz
!tar xf spark-2.4.1-bin-hadoop2.7.tgz
!pip install optimuspyspark
# ## Restart Runtime
# Before you continue, please go to the 'Runtime' Menu above, and select 'Restart Runtime (Ctrl + M + .)'.
if 'google.colab' in sys.modules:
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-2.4.1-bin-hadoop2.7"
# ## You are done. Enjoy Optimus!
# # Hacking Optimus!
# To hacking Optimus we recommend to clone the repo and change ```repo_path``` relative to this notebook.
# +
repo_path=".."
# This will reload the change you make to Optimus in real time
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append(repo_path)
# -
# ## Install Optimus
#
# from command line:
#
# `pip install optimuspyspark`
#
# from a notebook you can use:
#
# `!pip install optimuspyspark`
# ## Import Optimus and start it
from optimus import Optimus
op = Optimus(master="local")
# ## Dataframe creation
#
# Create a dataframe to passing a list of values for columns and rows. Unlike pandas you need to specify the column names.
#
df = op.create.df(
[
"names",
"height(ft)",
"function",
"rank",
"weight(t)",
"japanese name",
"last position",
"attributes"
],
[
("Optim'us", 28.0, "Leader", 10, 4.3, ["Inochi", "Convoy"], "19.442735,-99.201111", [8.5344, 4300.0]),
("bumbl#ebéé ", 17.5, "Espionage", 7, 2.0, ["Bumble", "Goldback"], "10.642707,-71.612534", [5.334, 2000.0]),
("ironhide&", 26.0, "Security", 7, 4.0, ["Roadbuster"], "37.789563,-122.400356", [7.9248, 4000.0]),
("Jazz", 13.0, "First Lieutenant", 8, 1.8, ["Meister"], "33.670666,-117.841553", [3.9624, 1800.0]),
("Megatron", None, "None", None, 5.7, ["Megatron"], None, [None, 5700.0]),
("Metroplex_)^$", 300.0, "Battle Station", 8, None, ["Metroflex"], None, [91.44, None]),
]).h_repartition(1)
df.table()
# Creating a dataframe by passing a list of tuples specifyng the column data type. You can specify as data type an string or a Spark Datatypes. https://spark.apache.org/docs/2.3.1/api/java/org/apache/spark/sql/types/package-summary.html
#
# Also you can use some Optimus predefined types:
# * "str" = StringType()
# * "int" = IntegerType()
# * "float" = FloatType()
# * "bool" = BoleanType()
df = op.create.df(
[
("names", "str"),
("height", "float"),
("function", "str"),
("rank", "int"),
],
[
("bumbl#ebéé ", 17.5, "Espionage", 7),
("Optim'us", 28.0, "Leader", 10),
("ironhide&", 26.0, "Security", 7),
("Jazz", 13.0, "First Lieutenant", 8),
("Megatron", None, "None", None),
])
df.table()
# Creating a dataframe and specify if the column accepts null values
df = op.create.df(
[
("names", "str", True),
("height", "float", True),
("function", "str", True),
("rank", "int", True),
],
[
("bumbl#ebéé ", 17.5, "Espionage", 7),
("Optim'us", 28.0, "Leader", 10),
("ironhide&", 26.0, "Security", 7),
("Jazz", 13.0, "First Lieutenant", 8),
("Megatron", None, "None", None),
])
df.table()
# Creating a Daframe using a pandas dataframe
# +
import pandas as pd
data = [("bumbl#ebéé ", 17.5, "Espionage", 7),
("Optim'us", 28.0, "Leader", 10),
("ironhide&", 26.0, "Security", 7)]
labels = ["names", "height", "function", "rank"]
# Create pandas dataframe
pdf = pd.DataFrame.from_records(data, columns=labels)
df = op.create.df(pdf=pdf)
df.table()
# -
# ## Viewing data
# Here is how to View the first 10 elements in a dataframe.
df.table(10)
# ## About Spark
# Spark and Optimus work differently than pandas or R. If you are not familiar with Spark, we recommend taking the time to take a look at the links below.
#
# ### Partitions
# Partition are the way Spark divide the data in your local computer or cluster to better optimize how it will be processed.It can greatly impact the Spark performance.
#
# Take 5 minutes to read this article:
# https://www.dezyre.com/article/how-data-partitioning-in-spark-helps-achieve-more-parallelism/297
#
# ### Lazy operations
# Lazy evaluation in Spark means that the execution will not start until an action is triggered.
#
# https://stackoverflow.com/questions/38027877/spark-transformation-why-its-lazy-and-what-is-the-advantage
#
# ### Inmutability
# Immutability rules out a big set of potential problems due to updates from multiple threads at once. Immutable data is definitely safe to share across processes.
#
# https://www.quora.com/Why-is-RDD-immutable-in-Spark
#
# ### Spark Architecture
# https://jaceklaskowski.gitbooks.io/mastering-apache-spark/spark-architecture.html
# ## Columns and Rows
#
# Optimus organized operations in columns and rows. This is a little different of how pandas works in which all operations are aroud the pandas class. We think this approach can better help you to access and transform data. For a deep dive about the designing decision please read:
#
# https://towardsdatascience.com/announcing-optimus-v2-agile-data-science-workflows-made-easy-c127a12d9e13
# Sort by cols names
df.cols.sort().table()
# Sort by rows rank value
df.rows.sort("rank").table()
df.describe().table()
# ## Selection
#
# Unlike Pandas, Spark DataFrames don't support random row access. So methods like `loc` in pandas are not available.
#
# Also Pandas don't handle indexes. So methods like `iloc` are not available.
# Select an show an specific column
df.cols.select("names").table()
# Select rows from a Dataframe where a the condition is meet
df.rows.select(df["rank"] > 7).table()
# Select rows by specific values on it
df.rows.is_in("rank", [7, 10]).table()
# Create and unique id for every row.
df.rows.create_id().table()
# Create wew columns
df.cols.append("Affiliation", "Autobot").table()
# ## Missing Data
# + {"inputHidden": false, "outputHidden": false}
df.rows.drop_na("*", how='any').table()
# -
# Filling missing data.
df.cols.fill_na("*", "N//A").table()
# To get the boolean mask where values are nan.
df.cols.is_na("*").table()
# # Operations
# ## Stats
# + {"inputHidden": false, "outputHidden": false}
df.cols.mean("height")
# + {"inputHidden": false, "outputHidden": false}
df.cols.mean("*")
# -
# ### Apply
# + {"inputHidden": false, "outputHidden": false}
def func(value, args):
return value + 1
df.cols.apply("height", func, "float").table()
# -
# ### Histogramming
df.cols.count_uniques("*")
# ### String Methods
# + {"inputHidden": false, "outputHidden": false}
df \
.cols.lower("names") \
.cols.upper("function").table()
# -
# ## Merge
# ### Concat
#
# Optimus provides and intuitive way to concat Dataframes by columns or rows.
# +
df_new = op.create.df(
[
"class"
],
[
("Autobot"),
("Autobot"),
("Autobot"),
("Autobot"),
("Decepticons"),
]).h_repartition(1)
op.append([df, df_new], "columns").table()
# +
df_new = op.create.df(
[
"names",
"height",
"function",
"rank",
],
[
("Grimlock", 22.9, "Dinobot Commander", 9),
]).h_repartition(1)
op.append([df, df_new], "rows").table()
# + {"inputHidden": false, "outputHidden": false}
# Operations like `join` and `group` are handle using Spark directly
# + {"inputHidden": false, "outputHidden": false}
df_melt = df.melt(id_vars=["names"], value_vars=["height", "function", "rank"])
df.table()
# -
df_melt.pivot("names", "variable", "value").table()
# ## Ploting
df.plot.hist("height", 10)
df.plot.frequency("*", 10)
# ## Getting Data In/Out
# + {"inputHidden": false, "outputHidden": false}
df.cols.names()
# + {"inputHidden": false, "outputHidden": false}
df.to_json()
# + {"inputHidden": false, "outputHidden": false}
df.schema
# -
df.table()
# + {"inputHidden": false, "outputHidden": false}
op.profiler.run(df, "height", infer=True)
# -
df_csv = op.load.csv("https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.csv").limit(5)
df_csv.table()
df_json = op.load.json("https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.json").limit(5)
df_json.table()
df_csv.save.csv("test.csv")
df.table()
# ## Enrichment
df = op.load.json("https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.json")
df.table()
# +
import requests
def func_request(params):
# You can use here whatever header or auth info you need to send.
# For more information see the requests library
url= "https://jsonplaceholder.typicode.com/todos/" + str(params["id"])
return requests.get(url)
def func_response(response):
# Here you can parse de response
return response["title"]
e = op.enrich(host="localhost", port=27017, db_name="jazz")
e.flush()
df_result = e.run(df, func_request, func_response, calls= 60, period = 60, max_tries = 8)
# -
df_result.table()
| apache-2.0 |
ky822/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
par2/lamana-test | setup.py | 1 | 1879 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='lamana',
version='0.4.8', ### edit
description='An extensible Python package for Laminate Analysis.',
author='P. Robinson II',
author_email='[email protected]',
url='https://github.com/par2/lamana', # use the URL to the github repo
download_url='https://github.com/par2/lamana/tarball/0.4.8',
packages=['lamana', 'lamana.models', 'lamana.utils', 'lamana.tests',
'lamana.tests.controls_LT', 'lamana.models.tests'],
keywords=['laminate analysis', 'visualization'],
install_requires=['matplotlib', 'pandas', 'numpy'],
classifiers=[
'Framework :: IPython',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
)
# References
# ----- --------- -------------
# (001) PyPI Tutorial http://peterdowns.com/posts/first-time-with-pypi.html
# (002) Git tagging https://git-scm.com/book/en/v2/Git-Basics-Tagging
# (003) Remove remote tag https://nathanhoad.net/how-to-delete-a-remote-git-tag
# (004) Create package http://zetcode.com/articles/packageinpython/
# (005) Detailed package guide https://python-packaging-user-guide.readthedocs.org/en/latest/
# (006) Updated guide: twine, pip, wheel http://joebergantine.com/blog/2015/jul/17/releasing-package-pypi/
# (007) packages Python Cookbook, 3rd ed, p. 435
# (008) install_requires https://python-packaging-user-guide.readthedocs.org/en/latest/requirements/
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/mixture/plot_gmm_covariances.py | 13 | 4262 | """
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((covar_type,
GMM(n_components=n_classes, covariance_type=covar_type,
init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators / 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
olologin/scikit-learn | examples/exercises/plot_cv_diabetes.py | 53 | 2861 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso(random_state=0)
alphas = np.logspace(-4, -0.5, 30)
scores = list()
scores_std = list()
n_folds = 3
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_val_score(lasso, X, y, cv=n_folds, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
scores, scores_std = np.array(scores), np.array(scores_std)
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
ARudiuk/mne-python | tutorials/plot_visualize_evoked.py | 3 | 7963 | """
.. _tut_viz_evoked:
=====================
Visualize Evoked data
=====================
"""
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
###############################################################################
# In this tutorial we focus on plotting functions of :class:`mne.Evoked`.
# Here we read the evoked object from a file. Check out
# :ref:`tut_epoching_and_averaging` to get to this stage from raw data.
data_path = mne.datasets.sample.data_path()
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evoked = mne.read_evokeds(fname, baseline=(None, 0), proj=True)
print(evoked)
###############################################################################
# Notice that ``evoked`` is a list of evoked instances. You can read only one
# of the categories by passing the argument ``condition`` to
# :func:`mne.read_evokeds`. To make things more simple for this tutorial, we
# read each instance to a variable.
evoked_l_aud = evoked[0]
evoked_r_aud = evoked[1]
evoked_l_vis = evoked[2]
evoked_r_vis = evoked[3]
###############################################################################
# Let's start with a simple one. We plot event related potentials / fields
# (ERP/ERF). The bad channels are not plotted by default. Here we explicitly
# set the exclude parameter to show the bad channels in red. All plotting
# functions of MNE-python return a handle to the figure instance. When we have
# the handle, we can customise the plots to our liking.
fig = evoked_l_aud.plot(exclude=())
###############################################################################
# All plotting functions of MNE-python returns a handle to the figure instance.
# When we have the handle, we can customise the plots to our liking. We can get
# rid of the empty space with a simple function call.
fig.tight_layout()
###############################################################################
# Now let's make it a bit fancier and only use MEG channels. Many of the
# MNE-functions include a ``picks`` parameter to include a selection of
# channels. ``picks`` is simply a list of channel indices that you can easily
# construct with :func:`mne.pick_types`. See also :func:`mne.pick_channels` and
# :func:`mne.pick_channels_regexp`.
# Using ``spatial_colors=True``, the individual channel lines are color coded
# to show the sensor positions - specifically, the x, y, and z locations of
# the sensors are transformed into R, G and B values.
picks = mne.pick_types(evoked_l_aud.info, meg=True, eeg=False, eog=False)
evoked_l_aud.plot(spatial_colors=True, gfp=True, picks=picks)
###############################################################################
# Notice the legend on the left. The colors would suggest that there may be two
# separate sources for the signals. This wasn't obvious from the first figure.
# Try painting the slopes with left mouse button. It should open a new window
# with topomaps (scalp plots) of the average over the painted area. There is
# also a function for drawing topomaps separately.
evoked_l_aud.plot_topomap()
###############################################################################
# By default the topomaps are drawn from evenly spread out points of time over
# the evoked data. We can also define the times ourselves.
times = np.arange(0.05, 0.151, 0.05)
evoked_r_aud.plot_topomap(times=times, ch_type='mag')
###############################################################################
# Or we can automatically select the peaks.
evoked_r_aud.plot_topomap(times='peaks', ch_type='mag')
###############################################################################
# You can take a look at the documentation of :func:`mne.Evoked.plot_topomap`
# or simply write ``evoked_r_aud.plot_topomap?`` in your python console to
# see the different parameters you can pass to this function. Most of the
# plotting functions also accept ``axes`` parameter. With that, you can
# customise your plots even further. First we shall create a set of matplotlib
# axes in a single figure and plot all of our evoked categories next to each
# other.
fig, ax = plt.subplots(1, 5)
evoked_l_aud.plot_topomap(times=0.1, axes=ax[0], show=False)
evoked_r_aud.plot_topomap(times=0.1, axes=ax[1], show=False)
evoked_l_vis.plot_topomap(times=0.1, axes=ax[2], show=False)
evoked_r_vis.plot_topomap(times=0.1, axes=ax[3], show=True)
###############################################################################
# Notice that we created five axes, but had only four categories. The fifth
# axes was used for drawing the colorbar. You must provide room for it when you
# create this kind of custom plots or turn the colorbar off with
# ``colorbar=False``. That's what the warnings are trying to tell you. Also, we
# used ``show=False`` for the three first function calls. This prevents the
# showing of the figure prematurely. The behavior depends on the mode you are
# using for your python session. See http://matplotlib.org/users/shell.html for
# more information.
#
# We can combine the two kinds of plots in one figure using the ``plot_joint``
# method of Evoked objects. Called as-is (``evoked.plot_joint()``), this
# function should give a stylish and informative display of spatio-temporal
# dynamics. Also note the ``topomap_args`` and ``ts_args`` parameters of
# :func:`mne.Evoked.plot_joint`. You can pass key-value pairs as a python
# dictionary that gets passed as parameters to the topomaps
# (:func:`mne.Evoked.plot_topomap`) and time series (:func:`mne.Evoked.plot`)
# of the joint plot.
# For specific styling, use these ``topomap_args`` and ``ts_args``
# arguments. Here, topomaps at specific time points (70 and 105 msec) are
# shown, sensors are not plotted, and the Global Field Power is shown:
ts_args = dict(gfp=True)
topomap_args = dict(sensors=False)
evoked_r_aud.plot_joint(title='right auditory', times=[.07, .105],
ts_args=ts_args, topomap_args=topomap_args)
###############################################################################
# We can also plot the activations as images. The time runs along the x-axis
# and the channels along the y-axis. The amplitudes are color coded so that
# the amplitudes from negative to positive translates to shift from blue to
# red. White means zero amplitude. You can use the ``cmap`` parameter to define
# the color map yourself. The accepted values include all matplotlib colormaps.
evoked_r_aud.plot_image(picks=picks)
###############################################################################
# Finally we plot the sensor data as a topographical view. In the simple case
# we plot only left auditory responses, and then we plot them all in the same
# figure for comparison. Click on the individual plots to open them bigger.
title = 'MNE sample data (condition : %s)'
evoked_l_aud.plot_topo(title=title % evoked_l_aud.comment)
colors = 'yellow', 'green', 'red', 'blue'
mne.viz.plot_evoked_topo(evoked, color=colors,
title=title % 'Left/Right Auditory/Visual')
###############################################################################
# Visualizing field lines in 3D
# -----------------------------
#
# We now compute the field maps to project MEG and EEG data to MEG helmet
# and scalp surface.
#
# To do this we'll need coregistration information. See
# :ref:`tut_forward` for more details.
#
# Here we just illustrate usage.
subjects_dir = data_path + '/subjects'
trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
maps = mne.make_field_map(evoked_l_aud, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1)
# explore several points in time
field_map = evoked_l_aud.plot_field(maps, time=.1)
###############################################################################
# .. note::
# If trans_fname is set to None then only MEG estimates can be visualized.
| bsd-3-clause |
lpantano/bcbio-nextgen | bcbio/utils.py | 1 | 20352 | """Helpful utilities for building analysis pipelines.
"""
import gzip
import os
import tempfile
import time
import shutil
import contextlib
import itertools
import functools
import random
import ConfigParser
import collections
import fnmatch
import subprocess
import sys
import subprocess
import toolz as tz
import yaml
try:
from concurrent import futures
except ImportError:
try:
import futures
except ImportError:
futures = None
@contextlib.contextmanager
def cpmap(cores=1):
"""Configurable parallel map context manager.
Returns appropriate map compatible function based on configuration:
- Local single core (the default)
- Multiple local cores
"""
if int(cores) == 1:
yield itertools.imap
else:
if futures is None:
raise ImportError("concurrent.futures not available")
pool = futures.ProcessPoolExecutor(cores)
yield pool.map
pool.shutdown()
def map_wrap(f):
"""Wrap standard function to easily pass into 'map' processing.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def transform_to(ext):
"""
Decorator to create an output filename from an output filename with
the specified extension. Changes the extension, in_file is transformed
to a new type.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@transform(".bam")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.bam")
@transform(".bam")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = replace_suffix(os.path.basename(in_path), ext)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def filter_to(word):
"""
Decorator to create an output filename from an input filename by
adding a word onto the stem. in_file is filtered by the function
and the results are written to out_file. You would want to use
this over transform_to if you don't know the extension of the file
going in. This also memoizes the output file.
Takes functions like this to decorate:
f(in_file, out_dir=None, out_file=None) or,
f(in_file=in_file, out_dir=None, out_file=None)
examples:
@filter_to(".foo")
f("the/input/path/file.sam") ->
f("the/input/path/file.sam", out_file="the/input/path/file.foo.bam")
@filter_to(".foo")
f("the/input/path/file.sam", out_dir="results") ->
f("the/input/path/file.sam", out_file="results/file.foo.bam")
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
out_file = kwargs.get("out_file", None)
if not out_file:
in_path = kwargs.get("in_file", args[0])
out_dir = kwargs.get("out_dir", os.path.dirname(in_path))
safe_makedir(out_dir)
out_name = append_stem(os.path.basename(in_path), word)
out_file = os.path.join(out_dir, out_name)
kwargs["out_file"] = out_file
if not file_exists(out_file):
out_file = f(*args, **kwargs)
return out_file
return wrapper
return decor
def memoize_outfile(ext=None, stem=None):
"""
Memoization decorator.
See docstring for transform_to and filter_to for details.
"""
if ext:
return transform_to(ext)
if stem:
return filter_to(stem)
def safe_makedir(dname):
"""Make a directory if it doesn't exist, handling concurrent race conditions.
"""
if not dname:
return dname
num_tries = 0
max_tries = 5
while not os.path.exists(dname):
# we could get an error here if multiple processes are creating
# the directory at the same time. Grr, concurrency.
try:
os.makedirs(dname)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(2)
return dname
@contextlib.contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
@contextlib.contextmanager
def tmpfile(*args, **kwargs):
"""Make a tempfile, safely cleaning up file descriptors on completion.
"""
(fd, fname) = tempfile.mkstemp(*args, **kwargs)
try:
yield fname
finally:
os.close(fd)
if os.path.exists(fname):
os.remove(fname)
def file_exists(fname):
"""Check if a file exists and is non-empty.
"""
try:
return fname and os.path.exists(fname) and os.path.getsize(fname) > 0
except OSError:
return False
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
os.path.getmtime(fname) >= os.path.getmtime(cmp_fname))
except OSError:
return False
def create_dirs(config, names=None):
if names is None:
names = config["dir"].keys()
for dname in names:
d = config["dir"][dname]
safe_makedir(d)
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
with open(fname, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason)
def read_galaxy_amqp_config(galaxy_config, base_dir):
"""Read connection information on the RabbitMQ server from Galaxy config.
"""
galaxy_config = add_full_path(galaxy_config, base_dir)
config = ConfigParser.ConfigParser()
config.read(galaxy_config)
amqp_config = {}
for option in config.options("galaxy_amqp"):
amqp_config[option] = config.get("galaxy_amqp", option)
return amqp_config
def add_full_path(dirname, basedir=None):
if basedir is None:
basedir = os.getcwd()
if not dirname.startswith("/"):
dirname = os.path.join(basedir, dirname)
return dirname
def splitext_plus(f):
"""Split on file extensions, allowing for zipped extensions.
"""
base, ext = os.path.splitext(f)
if ext in [".gz", ".bz2", ".zip"]:
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
return base, ext
def remove_safe(f):
try:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
except OSError:
pass
def file_plus_index(fname):
"""Convert a file name into the file plus required indexes.
"""
exts = {".vcf": ".idx", ".bam": ".bai", ".vcf.gz": ".tbi", ".bed.gz": ".tbi",
".fq.gz": ".gbi"}
ext = splitext_plus(fname)[-1]
if ext in exts:
return [fname, fname + exts[ext]]
else:
return [fname]
def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext)
def symlink_plus(orig, new):
"""Create relative symlinks and handle associated biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
with chdir(os.path.dirname(new)):
remove_safe(new + ext)
# Work around symlink issues on some filesystems. Randomly
# fail to symlink.
try:
os.symlink(os.path.relpath(orig + ext), os.path.basename(new + ext))
except OSError:
if not os.path.exists(new + ext) or not os.path.lexists(new + ext):
remove_safe(new + ext)
shutil.copyfile(orig + ext, new + ext)
orig_noext = splitext_plus(orig)[0]
new_noext = splitext_plus(new)[0]
for sub_ext in [".bai"]:
if os.path.exists(orig_noext + sub_ext) and not os.path.lexists(new_noext + sub_ext):
with chdir(os.path.dirname(new_noext)):
os.symlink(os.path.relpath(orig_noext + sub_ext), os.path.basename(new_noext + sub_ext))
def open_gzipsafe(f):
return gzip.open(f) if f.endswith(".gz") else open(f)
def append_stem(to_transform, word):
"""
renames a filename or list of filenames with 'word' appended to the stem
of each one:
example: append_stem("/path/to/test.sam", "_filtered") ->
"/path/to/test_filtered.sam"
"""
if is_sequence(to_transform):
return [append_stem(f, word) for f in to_transform]
elif is_string(to_transform):
(base, ext) = splitext_plus(to_transform)
return "".join([base, word, ext])
else:
raise ValueError("append_stem takes a single filename as a string or "
"a list of filenames to transform.")
def replace_suffix(to_transform, suffix):
"""
replaces the suffix on a filename or list of filenames
example: replace_suffix("/path/to/test.sam", ".bam") ->
"/path/to/test.bam"
"""
if is_sequence(to_transform):
transformed = []
for f in to_transform:
(base, _) = os.path.splitext(f)
transformed.append(base + suffix)
return transformed
elif is_string(to_transform):
(base, _) = os.path.splitext(to_transform)
return base + suffix
else:
raise ValueError("replace_suffix takes a single filename as a string or "
"a list of filenames to transform.")
# ## Functional programming
def partition_all(n, iterable):
"""Partition a list into equally sized pieces, including last smaller parts
http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
break
yield chunk
def robust_partition_all(n, iterable):
"""
replaces partition_all with a more robust version.
Workaround for a segfault in pybedtools when using a BedTool as an iterator:
https://github.com/daler/pybedtools/issues/88 for the discussion
"""
it = iter(iterable)
while True:
x = []
for _ in range(n):
try:
x.append(it.next())
except StopIteration:
yield x
# Omitting this StopIteration results in a segfault!
raise StopIteration
yield x
def partition(pred, iterable, tolist=False):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = itertools.tee(iterable)
ifalse = itertools.ifilterfalse(pred, t1)
itrue = itertools.ifilter(pred, t2)
if tolist:
return list(ifalse), list(itrue)
else:
return ifalse, itrue
# ## Dealing with configuration files
def merge_config_files(fnames):
"""Merge configuration files, preferring definitions in latter files.
"""
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.iteritems():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.iteritems():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
def get_in(d, t, default=None):
"""
look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {}
"""
return tz.get_in(t, d, default)
def flatten(l):
"""
flatten an irregular list of lists
example: flatten([[[1, 2, 3], [4, 5]], 6]) -> [1, 2, 3, 4, 5, 6]
lifted from: http://stackoverflow.com/questions/2158395/
"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el,
basestring):
for sub in flatten(el):
yield sub
else:
yield el
def is_sequence(arg):
"""
check if 'arg' is a sequence
example: arg([]) -> True
example: arg("lol") -> False
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
def is_pair(arg):
"""
check if 'arg' is a two-item sequence
"""
return is_sequence(arg) and len(arg) == 2
def is_string(arg):
return isinstance(arg, basestring)
def locate(pattern, root=os.curdir):
'''Locate all files matching supplied filename pattern in and below
supplied root directory.'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def itersubclasses(cls, _seen=None):
"""
snagged from: http://code.activestate.com/recipes/576949/
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(itersubclasses(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def replace_directory(out_files, dest_dir):
"""
change the output directory to dest_dir
can take a string (single file) or a list of files
"""
if is_sequence(out_files):
filenames = map(os.path.basename, out_files)
return [os.path.join(dest_dir, x) for x in filenames]
elif is_string(out_files):
return os.path.join(dest_dir, os.path.basename(out_files))
else:
raise ValueError("in_files must either be a sequence of filenames "
"or a string")
def which(program):
""" returns the path to an executable or None if it can't be found"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def reservoir_sample(stream, num_items, item_parser=lambda x: x):
"""
samples num_items from the stream keeping each with equal probability
"""
kept = []
for index, item in enumerate(stream):
if index < num_items:
kept.append(item_parser(item))
else:
r = random.randint(0, index)
if r < num_items:
kept[r] = item_parser(item)
return kept
def compose(f, g):
return lambda x: f(g(x))
def dictapply(d, fn):
"""
apply a function to all non-dict values in a dictionary
"""
for k, v in d.items():
if isinstance(v, dict):
v = dictapply(v, fn)
else:
d[k] = fn(v)
return d
def Rscript_cmd():
"""Retrieve path to locally installed Rscript or first in PATH.
Prefers Rscript version installed via conda to a system version.
"""
rscript = which(os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "Rscript"))
if rscript:
return rscript
else:
return which("Rscript")
def R_sitelib():
"""Retrieve the R site-library installed with the bcbio installer.
"""
from bcbio import install
return os.path.join(install.get_defaults().get("tooldir", "/usr/local"),
"lib", "R", "site-library")
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
rscript = Rscript_cmd()
cmd = """{rscript} -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError, e:
return None
for line in output.split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None
def is_gzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".gz", "gzip"]
def is_bzipped(fname):
_, ext = os.path.splitext(fname)
return ext in [".bz2", "bzip2"]
def open_possible_gzip(fname, flag="r"):
if is_gzipped(fname):
if "b" not in flag:
flag += "b"
return gzip.open(fname, flag)
else:
return open(fname, flag)
def filter_missing(xs):
"""
remove items from a list if they evaluate to False
"""
return filter(lambda x: x, xs)
def rbind(dfs):
"""
acts like rbind for pandas dataframes
"""
if len(dfs) == 1:
return dfs[0]
df = dfs[0]
for d in dfs[1:]:
df = df.append(d)
return df
def max_command_length():
"""
get the maximum length of the command line, in bytes, defaulting
to a conservative number if not set
http://www.in-ulm.de/~mascheck/various/argmax/
"""
DEFAULT_MAX_LENGTH = 150000 # lowest seen so far is 200k
try:
arg_max = os.sysconf('SC_ARG_MAX')
env_lines = len(os.environ) * 4
env_chars = sum([len(x) + len(y) for x, y in os.environ.iteritems()])
arg_length = arg_max - env_lines - 2048
except ValueError:
arg_length = DEFAULT_MAX_LENGTH
return arg_length if arg_length > 0 else DEFAULT_MAX_LENGTH
| mit |
nhmc/xastropy | xastropy/PH136/exercises/solarspec_exerc.py | 7 | 5163 | """Module to perform the Solar Spectrum exercise exercise for PH136.
Currently tuned to Lick spectra from Jan 2013
"""
# Import libraries
import numpy as np
from astropy.io import fits
from matplotlib import pyplot
import pdb
#####################################
# Show a 1D spectrum
# Useful to eyeball the pixel values of a few key lines
# import xastropy.PH136.exercises.solarspec_exerc as ssp
# reload(ssp)
# ssp.plot_spec('b1014.fits')
def plot_spec(fits_fil,prow=None,give_spec=False, noplot=False):
from astropy.io.fits import getdata
# Read
arr,head = getdata(fits_fil,0,header=True)
siz = arr.shape
if prow == None:
prow = int(siz[0]/2.)
# Define the spectrum
spec = arr[prow,:]
npix = len(spec)
#pdb.set_trace()
# Plot
if noplot:
pyplot.clf()
pyplot.plot(np.arange(npix), spec)
pyplot.show()
if give_spec:
return spec
else:
return
#pdb.set_trace()
################################################
# Define a Gaussian plus a floor offset
def gauss_off(x, Z, A, x0, sigma):
return Z + A*np.exp(- (x-x0)**2 / (2.*sigma**2) )
################################################
# Fit a wavelength solution to hard-coded values, and plot
def gaussfit_line(xval, yval, xguess, xrng=15.,debug=False):
from scipy.optimize import curve_fit
import xastropy.PH136.exercises.solarspec_exerc as ssp
# Grab the region to fit
gdx = np.where( np.fabs( xval-xguess ) < xrng )
newx = xval[gdx]
newy = yval[gdx]
# Guess
Aguess = np.max(newy)
sguess = 2.
Z = np.median(newy)
pguess = [Z, Aguess, xguess, sguess]
# Fit
popt, pcov = curve_fit(ssp.gauss_off, newx, newy, p0=pguess)
# Debug
if debug:
pyplot.clf()
#data
pyplot.plot(newx, newy, 'o')
#curve
xdum = np.linspace(np.min(newx), np.max(newx), num=300)
Z,A,x0,sigma = popt
ydum = Z + A*np.exp(- (xdum-x0)**2 / (2.*sigma**2) )
# plot
pyplot.plot(xdum, ydum)
pyplot.show()
return popt[2]
################################################
# Fit a wavelength solution using hard-coded guesses, and plot
# import xastropy.PH136.exercises.solarspec_exerc as ssp
# reload(ssp)
# fit = ssp.fit_lines(fits_fil='b1014.fits')
def fit_lines(fits_fil=None, xquery=None,show_plot=False, plot_spec=False):
import xastropy.PH136.exercises.solarspec_exerc as ssp
from astropy.io.fits import getdata
if fits_fil == None:
fits_fil = 'b1014.fits'
spec = ssp.plot_spec(fits_fil, give_spec=True)
npix = len(spec)
xpix = np.arange(npix)
# Generate the arrays
wav_val = np.array( [5085.82, 4799.92, 4358.33, 3466.55])
guess_pix_val = np.array( [1930.5, 1664.72, 1241.46, 316.8])
# Centroid those lines!
nlin = len(guess_pix_val)
pix_val = np.zeros(nlin)
ii=0
for gpix in guess_pix_val:
pix_val[ii] = ssp.gaussfit_line(xpix,spec,gpix)#,debug=True)
ii += 1
#pdb.set_trace()
# Fit
fit = np.polyfit(pix_val, wav_val, 2)
print 'Fit (dlam, w0): ', fit
# Setup for plot
pv = np.poly1d(fit)
xval = np.linspace(1., 2000, 100)
yval = pv(xval)
# Plot?
if show_plot:
pyplot.clf()
pyplot.plot(pix_val, wav_val, 'o')
pyplot.plot(xval, yval)
pyplot.xlabel('pixel')
pyplot.ylabel('Wave (Ang)')
#pyplot.show()
pyplot.savefig('arclin_fit.pdf')
# Plot the spectrum
if plot_spec and (fits_fil != None):
spec = ssp.plot_spec(fits_fil, give_spec=True, noplot=True)
npix = len(spec)
xval = np.arange(npix)
wave = pv(xval)
pyplot.clf()
pyplot.plot(wave, spec,drawstyle="steps-mid", ls='-')
pyplot.xlim([3000., 5500])
pyplot.xlabel('Wavelength (Ang)')
pyplot.ylabel('Counts')
pyplot.savefig('arclin_spec.pdf')
# Print a value
if xquery != None:
wquery = pv(xquery)
print 'Wavelength for pixel = ', xquery, ' is wave = ', wquery
return fit
################################################
# Extract and show the solar spectrum
# import xastropy.PH136.exercises.solarspec_exerc as ssp
# reload(ssp)
# ssp.sol_spec()
def sol_spec(fits_fil=None, xquery=None,show_plot=False, plot_spec=False, arc_fil=None):
import xastropy.PH136.exercises.solarspec_exerc as ssp
# Get wavelength solution
if arc_fil == None:
arc_fil = 'b1014.fits'
fit = ssp.fit_lines(fits_fil=arc_fil)
pv = np.poly1d(fit)
# Read Solar spectrum
if fits_fil == None:
fits_fil = 'b1029.fits'
spec = ssp.plot_spec(fits_fil, give_spec=True)
npix = len(spec)
xpix = np.arange(npix)
wave = pv(xpix)
# Plot
pyplot.clf()
pyplot.plot(wave, spec,drawstyle="steps-mid", ls='-')
pyplot.xlim([3000., 5500])
pyplot.xlabel('Wavelength (Ang)')
pyplot.ylabel('Counts')
# Ca lines
pyplot.axvline(3933.68, color='r')
pyplot.axvline(3968.47, color='r')
pyplot.show()
# Ca H+K
# 3955.5, 3991.
| bsd-3-clause |
johnson1228/pymatgen | pymatgen/io/lammps/data.py | 1 | 45831 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
from collections import OrderedDict
from io import StringIO
import itertools
import re
import warnings
import numpy as np
import pandas as pd
from monty.json import MSONable
from ruamel.yaml import YAML
from six import string_types
from pymatgen.util.io_utils import clean_lines
from pymatgen.core.structure import SiteCollection
from pymatgen import Molecule, Element, Lattice, Structure
"""
This module implements a core class LammpsData for generating/parsing
LAMMPS data file, and other bridging classes to build LammpsData from
molecules.
Only point particle styles are supported for now (atom_style in angle,
atomic, bond, charge, full and molecular only). See the pages below for
more info.
http://lammps.sandia.gov/doc/atom_style.html
http://lammps.sandia.gov/doc/read_data.html
"""
__author__ = "Kiran Mathew, Zhi Deng"
__email__ = "[email protected], [email protected]"
__credits__ = "Brandon Wood"
SECTION_KEYWORDS = {"atom": ["Atoms", "Velocities", "Masses",
"Ellipsoids", "Lines", "Triangles", "Bodies"],
"topology": ["Bonds", "Angles", "Dihedrals", "Impropers"],
"ff": ["Pair Coeffs", "PairIJ Coeffs", "Bond Coeffs",
"Angle Coeffs", "Dihedral Coeffs",
"Improper Coeffs"],
"class2": ["BondBond Coeffs", "BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs", "AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs", "AngleAngle Coeffs"]}
CLASS2_KEYWORDS = {"Angle Coeffs": ["BondBond Coeffs", "BondAngle Coeffs"],
"Dihedral Coeffs": ["MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs"],
"Improper Coeffs": ["AngleAngle Coeffs"]}
SECTION_HEADERS = {"Masses": ["mass"],
"Velocities": ["vx", "vy", "vz"],
"Bonds": ["type", "atom1", "atom2"],
"Angles": ["type", "atom1", "atom2", "atom3"],
"Dihedrals": ["type", "atom1", "atom2", "atom3", "atom4"],
"Impropers": ["type", "atom1", "atom2", "atom3", "atom4"]}
ATOMS_HEADERS = {"angle": ["molecule-ID", "type", "x", "y", "z"],
"atomic": ["type", "x", "y", "z"],
"bond": ["molecule-ID", "type", "x", "y", "z"],
"charge": ["type", "q", "x", "y", "z"],
"full": ["molecule-ID", "type", "q", "x", "y", "z"],
"molecular": ["molecule-ID", "type", "x", "y", "z"]}
class LammpsData(MSONable):
"""
Object for representing the data in a LAMMPS data file.
"""
def __init__(self, masses, atoms, box_bounds, box_tilt=None,
velocities=None, force_field=None, topology=None,
atom_style="full"):
"""
This is a low level constructor designed to work with parsed
data or other bridging objects (ForceField and Topology). Not
recommended to use directly.
Args:
masses (pandas.DataFrame): DataFrame with one column
["mass"] for Masses section.
atoms (pandas.DataFrame): DataFrame with multiple columns
for Atoms section. Column names vary with atom_style.
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
velocities (pandas.DataFrame): DataFrame with three columns
["vx", "vy", "vz"] for Velocities section. Optional
with default to None. If not None, its index should be
consistent with atoms.
force_field (dict): Data for force field sections. Optional
with default to None. Only keywords in force field and
class 2 force field are valid keys, and each value is a
DataFrame.
topology (dict): Data for topology sections. Optional with
default to None. Only keywords in topology are valid
keys, and each value is a DataFrame.
atom_style (str): Output atom_style. Default to "full".
"""
bounds_arr = np.array(box_bounds)
bounds_shape = bounds_arr.shape
assert bounds_shape == (3, 2), \
"Expecting a (3, 2) array for box_bounds," \
" got {}".format(bounds_shape)
box_bounds = bounds_arr.tolist()
if box_tilt is not None:
tilt_arr = np.array(box_tilt)
tilt_shape = tilt_arr.shape
assert tilt_shape == (3,),\
"Expecting a (3,) array for box_tilt," \
" got {}".format(tilt_shape)
box_tilt = tilt_arr.tolist()
if velocities is not None:
assert len(velocities) == len(atoms),\
"Inconsistency found between atoms and velocities"
if force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
force_field = {k: v for k, v in force_field.items()
if k in all_ff_kws}
if topology:
topology = {k: v for k, v in topology.items()
if k in SECTION_KEYWORDS["topology"]}
self.masses = masses
self.atoms = atoms
self.box_bounds = box_bounds
self.box_tilt = box_tilt
self.velocities = velocities
self.force_field = force_field
self.topology = topology
self.atom_style = atom_style
def __str__(self):
return self.get_string()
@property
def structure(self):
"""
Export a periodic structure object representing the simulation box.
Return:
A pymatgen structure object
"""
masses = self.masses
atoms = self.atoms
atoms["molecule-ID"] = 1
box_bounds = np.array(self.box_bounds)
box_tilt = self.box_tilt if self.box_tilt else [0.0] * 3
ld_copy = self.__class__(masses, atoms, box_bounds, box_tilt)
_, topologies = ld_copy.disassemble()
molecule = topologies[0].sites
coords = molecule.cart_coords - box_bounds[:, 0]
species = molecule.species
matrix = np.diag(box_bounds[:, 1] - box_bounds[:, 0])
matrix[1, 0] = box_tilt[0]
matrix[2, 0] = box_tilt[1]
matrix[2, 1] = box_tilt[2]
latt = Lattice(matrix)
site_properties = None if self.velocities is None \
else {"velocities": self.velocities.values}
return Structure(latt, species, coords, coords_are_cartesian=True,
site_properties=site_properties)
def get_string(self, distance=6, velocity=8, charge=3):
"""
Returns the string representation of LammpsData, essentially
the string to be written to a file.
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
Returns:
String representation
"""
file_template = """Generated by pymatgen.io.lammps.data.LammpsData
{stats}
{box}
{body}
"""
box_ph = "{:.%df}" % distance
box_lines = []
for bound, d in zip(self.box_bounds, "xyz"):
fillers = bound + [d] * 2
bound_format = " ".join([box_ph] * 2 + [" {}lo {}hi"])
box_lines.append(bound_format.format(*fillers))
if self.box_tilt:
tilt_format = " ".join([box_ph] * 3 + [" xy xz yz"])
box_lines.append(tilt_format.format(*self.box_tilt))
box = "\n".join(box_lines)
body_dict = OrderedDict()
body_dict["Masses"] = self.masses
types = OrderedDict()
types["atom"] = len(self.masses)
if self.force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
ff_kws = [k for k in all_ff_kws if k in self.force_field]
for kw in ff_kws:
body_dict[kw] = self.force_field[kw]
if kw in SECTION_KEYWORDS["ff"][2:]:
types[kw.lower()[:-7]] = len(self.force_field[kw])
body_dict["Atoms"] = self.atoms
counts = OrderedDict()
counts["atoms"] = len(self.atoms)
if self.velocities is not None:
body_dict["Velocities"] = self.velocities
if self.topology:
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
body_dict[kw] = self.topology[kw]
counts[kw.lower()] = len(self.topology[kw])
all_stats = list(counts.values()) + list(types.values())
stats_template = "{:>%d} {}" % len(str(max(all_stats)))
count_lines = [stats_template.format(v, k) for k, v in counts.items()]
type_lines = [stats_template.format(v, k + " types")
for k, v in types.items()]
stats = "\n".join(count_lines + [""] + type_lines)
map_coords = lambda q: ("{:.%df}" % distance).format(q)
map_velos = lambda q: ("{:.%df}" % velocity).format(q)
map_charges = lambda q: ("{:.%df}" % charge).format(q)
formatters = {"x": map_coords, "y": map_coords, "z": map_coords,
"vx": map_velos, "vy": map_velos, "vz": map_velos,
"q": map_charges}
section_template = "{kw}\n\n{df}\n"
parts = []
for k, v in body_dict.items():
index = True if k != "PairIJ Coeffs" else False
df_string = v.to_string(header=False, formatters=formatters,
index_names=False, index=index)
parts.append(section_template.format(kw=k, df=df_string))
body = "\n".join(parts)
return file_template.format(stats=stats, box=box, body=body)
def write_file(self, filename, distance=6, velocity=8, charge=3):
"""
Writes LammpsData to file.
Args:
filename (str): Filename.
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
"""
with open(filename, "w") as f:
f.write(self.get_string(distance=distance, velocity=velocity,
charge=charge))
def disassemble(self, atom_labels=None, guess_element=True,
ff_label="ff_map"):
"""
Breaks down LammpsData to ForceField and a series of Topology.
RESTRICTIONS APPLIED:
1. No complex force field defined not just on atom
types, where the same type or equivalent types of topology
may have more than one set of coefficients.
2. No intermolecular topologies (with atoms from different
molecule-ID) since a Topology object includes data for ONE
molecule or structure only.
Args:
atom_labels ([str]): List of strings (must be different
from one another) for labelling each atom type found in
Masses section. Default to None, where the labels are
automaticaly added based on either element guess or
dummy specie assignment.
guess_element (bool): Whether to guess the element based on
its atomic mass. Default to True, otherwise dummy
species "Qa", "Qb", ... will be assigned to various
atom types. The guessed or assigned elements will be
reflected on atom labels if atom_labels is None, as
well as on the species of molecule in each Topology.
ff_label (str): Site property key for labeling atoms of
different types. Default to "ff_map".
Returns:
ForceField, [Topology]
"""
atoms_df = self.atoms.copy()
if "nx" in atoms_df.columns:
box_dim = np.ptp(self.box_bounds, axis=1)
atoms_df[["x", "y", "z"]] += atoms_df[["nx", "ny", "nz"]].values \
* box_dim
atoms_df = pd.concat([atoms_df, self.velocities], axis=1)
mids = atoms_df.get("molecule-ID")
if mids is None:
unique_mids = [1]
data_by_mols = {1: {"Atoms": atoms_df}}
else:
unique_mids = np.unique(mids)
data_by_mols = {}
for k in unique_mids:
df = atoms_df[atoms_df["molecule-ID"] == k]
data_by_mols[k] = {"Atoms": df}
masses = self.masses.copy()
masses["label"] = atom_labels
unique_masses = np.unique(masses["mass"])
if guess_element:
ref_masses = sorted([el.atomic_mass.real for el in Element])
diff = np.abs(np.array(ref_masses) - unique_masses[:, None])
atomic_numbers = np.argmin(diff, axis=1) + 1
symbols = [Element.from_Z(an).symbol for an in atomic_numbers]
else:
symbols = ["Q%s" % a for a in
map(chr, range(97, 97 + len(unique_masses)))]
for um, s in zip(unique_masses, symbols):
masses.loc[masses["mass"] == um, "element"] = s
if atom_labels is None: # add unique labels based on elements
for el, vc in masses["element"].value_counts().iteritems():
masses.loc[masses["element"] == el, "label"] = \
["%s%d" % (el, c) for c in range(1, vc + 1)]
assert masses["label"].nunique(dropna=False) == len(masses), \
"Expecting unique atom label for each type"
mass_info = [tuple([r["label"], r["mass"]])
for _, r in masses.iterrows()]
nonbond_coeffs, topo_coeffs = None, None
if self.force_field:
if "PairIJ Coeffs" in self.force_field:
nbc = self.force_field["PairIJ Coeffs"]
nbc = nbc.sort_values(["id1", "id2"]).drop(["id1", "id2"], axis=1)
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
elif "Pair Coeffs" in self.force_field:
nbc = self.force_field["Pair Coeffs"].sort_index()
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
topo_coeffs = {k: [] for k in SECTION_KEYWORDS["ff"][2:]
if k in self.force_field}
for kw in topo_coeffs.keys():
class2_coeffs = {k: list(v.itertuples(False, None))
for k, v in self.force_field.items()
if k in CLASS2_KEYWORDS.get(kw, [])}
ff_df = self.force_field[kw]
for t in ff_df.itertuples(True, None):
d = {"coeffs": list(t[1:]), "types": []}
if class2_coeffs:
d.update({k: list(v[t[0] - 1])
for k, v in class2_coeffs.items()})
topo_coeffs[kw].append(d)
if self.topology:
label_topo = lambda t: tuple(masses.loc[atoms_df.loc[t, "type"],
"label"])
for k, v in self.topology.items():
ff_kw = k[:-1] + " Coeffs"
for topo in v.itertuples(False, None):
topo_idx = topo[0] - 1
indices = topo[1:]
mids = atoms_df.loc[indices, "molecule-ID"].unique()
assert len(mids) == 1, \
"Do not support intermolecular topology formed " \
"by atoms with different molecule-IDs"
label = label_topo(indices)
topo_coeffs[ff_kw][topo_idx]["types"].append(label)
if data_by_mols[mids[0]].get(k):
data_by_mols[mids[0]][k].append(indices)
else:
data_by_mols[mids[0]][k] = [indices]
if topo_coeffs:
for v in topo_coeffs.values():
for d in v:
d["types"] = list(set(d["types"]))
ff = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs,
topo_coeffs=topo_coeffs)
topo_list = []
for mid in unique_mids:
data = data_by_mols[mid]
atoms = data["Atoms"]
shift = min(atoms.index)
type_ids = atoms["type"]
species = masses.loc[type_ids, "element"]
labels = masses.loc[type_ids, "label"]
coords = atoms[["x", "y", "z"]]
m = Molecule(species.values, coords.values,
site_properties={ff_label: labels.values})
charges = atoms.get("q")
velocities = atoms[["vx", "vy", "vz"]] if "vx" in atoms.columns \
else None
topologies = {}
for kw in SECTION_KEYWORDS["topology"]:
if data.get(kw):
topologies[kw] = (np.array(data[kw]) - shift).tolist()
topologies = None if not topologies else topologies
topo_list.append(Topology(sites=m, ff_label=ff_label,
charges=charges, velocities=velocities,
topologies=topologies))
return ff, topo_list
@classmethod
def from_file(cls, filename, atom_style="full", sort_id=False):
"""
Constructor that parses a file.
Args:
filename (str): Filename to read.
atom_style (str): Associated atom_style. Default to "full".
sort_id (bool): Whether sort each section by id. Default to
True.
"""
with open(filename) as f:
lines = f.readlines()
kw_pattern = r"|".join(itertools.chain(*SECTION_KEYWORDS.values()))
section_marks = [i for i, l in enumerate(lines)
if re.search(kw_pattern, l)]
parts = np.split(lines, section_marks)
float_group = r"([0-9eE.+-]+)"
header_pattern = dict()
header_pattern["counts"] = r"^\s*(\d+)\s+([a-zA-Z]+)$"
header_pattern["types"] = r"^\s*(\d+)\s+([a-zA-Z]+)\s+types$"
header_pattern["bounds"] = r"^\s*{}$".format(r"\s+".join(
[float_group] * 2 + [r"([xyz])lo \3hi"]))
header_pattern["tilt"] = r"^\s*{}$".format(r"\s+".join(
[float_group] * 3 + ["xy xz yz"]))
header = {"counts": {}, "types": {}}
bounds = {}
for l in clean_lines(parts[0][1:]): # skip the 1st line
match = None
for k, v in header_pattern.items():
match = re.match(v, l)
if match:
break
else:
continue
if match and k in ["counts", "types"]:
header[k][match.group(2)] = int(match.group(1))
elif match and k == "bounds":
g = match.groups()
bounds[g[2]] = [float(i) for i in g[:2]]
elif match and k == "tilt":
header["tilt"] = [float(i) for i in match.groups()]
header["bounds"] = [bounds.get(i, [-0.5, 0.5]) for i in "xyz"]
def parse_section(sec_lines):
title_info = sec_lines[0].split("#", 1)
kw = title_info[0].strip()
sio = StringIO("".join(sec_lines[2:])) # skip the 2nd line
df = pd.read_csv(sio, header=None, comment="#",
delim_whitespace=True)
if kw.endswith("Coeffs") and not kw.startswith("PairIJ"):
names = ["id"] + ["coeff%d" % i
for i in range(1, df.shape[1])]
elif kw == "PairIJ Coeffs":
names = ["id1", "id2"] + ["coeff%d" % i
for i in range(1, df.shape[1] - 1)]
df.index.name = None
elif kw in SECTION_HEADERS:
names = ["id"] + SECTION_HEADERS[kw]
elif kw == "Atoms":
names = ["id"] + ATOMS_HEADERS[atom_style]
if df.shape[1] == len(names):
pass
elif df.shape[1] == len(names) + 3:
names += ["nx", "ny", "nz"]
else:
raise ValueError("Format in Atoms section inconsistent"
" with atom_style %s" % atom_style)
else:
raise NotImplementedError("Parser for %s section"
" not implemented" % kw)
df.columns = names
if sort_id:
sort_by = "id" if kw != "PairIJ Coeffs" else ["id1", "id2"]
df.sort_values(sort_by, inplace=True)
if "id" in df.columns:
df.set_index("id", drop=True, inplace=True)
df.index.name = None
return kw, df
err_msg = "Bad LAMMPS data format where "
body = {}
seen_atoms = False
for part in parts[1:]:
name, section = parse_section(part)
if name == "Atoms":
seen_atoms = True
if name in ["Velocities"] + SECTION_KEYWORDS["topology"] and \
not seen_atoms: # Atoms must appear earlier than these
raise RuntimeError(err_msg + "%s section appears before"
" Atoms section" % name)
body.update({name: section})
err_msg += "Nos. of {} do not match between header and {} section"
assert len(body["Masses"]) == header["types"]["atom"], \
err_msg.format("atom types", "Masses")
atom_sections = ["Atoms", "Velocities"] \
if "Velocities" in body else ["Atoms"]
for s in atom_sections:
assert len(body[s]) == header["counts"]["atoms"], \
err_msg.format("atoms", s)
for s in SECTION_KEYWORDS["topology"]:
if header["counts"].get(s.lower(), 0) > 0:
assert len(body[s]) == header["counts"][s.lower()], \
err_msg.format(s.lower(), s)
items = {k.lower(): body[k] for k in ["Masses", "Atoms"]}
items["box_bounds"] = header["bounds"]
items["box_tilt"] = header.get("tilt")
items["velocities"] = body.get("Velocities")
ff_kws = [k for k in body if k
in SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]]
items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws \
else None
topo_kws = [k for k in body if k in SECTION_KEYWORDS["topology"]]
items["topology"] = {k: body[k] for k in topo_kws} \
if topo_kws else None
items["atom_style"] = atom_style
return cls(**items)
@classmethod
def from_ff_and_topologies(cls, ff, topologies, box_bounds, box_tilt=None,
atom_style="full"):
"""
Constructor building LammpsData from a ForceField object and a
list of Topology objects. Do not support intermolecular
topologies since a Topology object includes data for ONE
molecule or structure only.
Args:
ff (ForceField): ForceField object with data for Masses and
force field sections.
topologies ([Topology]): List of Topology objects with data
for Atoms, Velocities and topology sections.
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
atom_style (str): Output atom_style. Default to "full".
"""
atom_types = set.union(*[t.species for t in topologies])
assert atom_types.issubset(ff.maps["Atoms"].keys()),\
"Unknown atom type found in topologies"
items = dict(box_bounds=box_bounds, box_tilt=box_tilt,
atom_style=atom_style, masses=ff.masses,
force_field=ff.force_field)
mol_ids, charges, coords, labels = [], [], [], []
v_collector = [] if topologies[0].velocities else None
topo_collector = {"Bonds": [], "Angles": [], "Dihedrals": [],
"Impropers": []}
topo_labels = {"Bonds": [], "Angles": [], "Dihedrals": [],
"Impropers": []}
for i, topo in enumerate(topologies):
if topo.topologies:
shift = len(labels)
for k, v in topo.topologies.items():
topo_collector[k].append(np.array(v) + shift + 1)
topo_labels[k].extend([tuple([topo.type_by_sites[j]
for j in t]) for t in v])
if isinstance(v_collector, list):
v_collector.append(topo.velocities)
mol_ids.extend([i + 1] * len(topo.sites))
labels.extend(topo.type_by_sites)
coords.append(topo.sites.cart_coords)
q = [0.0] * len(topo.sites) if not topo.charges else topo.charges
charges.extend(q)
atoms = pd.DataFrame(np.concatenate(coords), columns=["x", "y", "z"])
atoms["molecule-ID"] = mol_ids
atoms["q"] = charges
atoms["type"] = list(map(ff.maps["Atoms"].get, labels))
atoms.index += 1
atoms = atoms[ATOMS_HEADERS[atom_style]]
velocities = None
if v_collector:
velocities = pd.DataFrame(np.concatenate(v_collector),
columns=SECTION_HEADERS["Velocities"])
velocities.index += 1
topology = {k: None for k, v in topo_labels.items() if len(v) > 0}
for k in topology:
df = pd.DataFrame(np.concatenate(topo_collector[k]),
columns=SECTION_HEADERS[k][1:])
df["type"] = list(map(ff.maps[k].get, topo_labels[k]))
if any(pd.isnull(df["type"])): # Throw away undefined topologies
warnings.warn("Undefined %s detected and removed" % k.lower())
df.dropna(subset=["type"], inplace=True)
df.reset_index(drop=True, inplace=True)
df.index += 1
topology[k] = df[SECTION_HEADERS[k]]
topology = {k: v for k, v in topology.items() if not v.empty}
items.update({"atoms": atoms, "velocities": velocities,
"topology": topology})
return cls(**items)
@classmethod
def from_dict(cls, d):
decode_df = lambda s: pd.read_json(s, orient="split")
items = dict()
items["masses"] = decode_df(d["masses"])
items["atoms"] = decode_df(d["atoms"])
items["box_bounds"] = d["box_bounds"]
items["box_tilt"] = d["box_tilt"]
items["atom_style"] = d["atom_style"]
velocities = d["velocities"]
if velocities:
velocities = decode_df(velocities)
items["velocities"] = velocities
force_field = d["force_field"]
if force_field:
force_field = {k: decode_df(v) for k, v in force_field.items()}
items["force_field"] = force_field
topology = d["topology"]
if topology:
topology = {k: decode_df(v) for k, v in topology.items()}
items["topology"] = topology
return cls(**items)
def as_dict(self):
encode_df = lambda df: df.to_json(orient="split")
d = dict()
d["@module"] = self.__class__.__module__
d["class"] = self.__class__.__name__
d["masses"] = encode_df(self.masses)
d["atoms"] = encode_df(self.atoms)
d["box_bounds"] = self.box_bounds
d["box_tilt"] = self.box_tilt
d["atom_style"] = self.atom_style
d["velocities"] = None if self.velocities is None \
else encode_df(self.velocities)
d["force_field"] = None if not self.force_field \
else {k: encode_df(v) for k, v in self.force_field.items()}
d["topology"] = None if not self.topology \
else {k: encode_df(v) for k, v in self.topology.items()}
return d
class Topology(MSONable):
"""
Class carrying most data in Atoms, Velocities and molecular
topology sections for ONE SINGLE Molecule or Structure
object, or a plain list of Sites.
"""
def __init__(self, sites, ff_label=None, charges=None, velocities=None,
topologies=None):
"""
Args:
sites ([Site] or SiteCollection): A group of sites in a
list or as a Molecule/Structure.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
topologies (dict): Bonds, angles, dihedrals and improper
dihedrals defined by site indices. Default to None,
i.e., no additional topology. All four valid keys
listed below are optional.
{
"Bonds": [[i, j], ...],
"Angles": [[i, j, k], ...],
"Dihedrals": [[i, j, k, l], ...],
"Impropers": [[i, j, k, l], ...]
}
"""
if not isinstance(sites, SiteCollection):
sites = Molecule.from_sites(sites)
if ff_label:
type_by_sites = sites.site_properties.get(ff_label)
else:
type_by_sites = [site.species_string for site in sites]
# search for site property if not override
if charges is None:
charges = sites.site_properties.get("charge")
if velocities is None:
velocities = sites.site_properties.get("velocities")
# validate shape
if charges is not None:
charge_arr = np.array(charges)
assert charge_arr.shape == (len(sites),),\
"Wrong format for charges"
charges = charge_arr.tolist()
if velocities is not None:
velocities_arr = np.array(velocities)
assert velocities_arr.shape == (len(sites), 3), \
"Wrong format for velocities"
velocities = velocities_arr.tolist()
if topologies:
topologies = {k: v for k, v in topologies.items()
if k in SECTION_KEYWORDS["topology"]}
self.sites = sites
self.ff_label = ff_label
self.charges = charges
self.velocities = velocities
self.topologies = topologies
self.type_by_sites = type_by_sites
self.species = set(type_by_sites)
@classmethod
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True,
ff_label=None, charges=None, velocities=None, tol=0.1):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2]))
for b in real_bonds]
if not all((bond, bond_list)):
# do not search for others if not searching for bonds or no bonds
return cls(sites=molecule, ff_label=ff_label, charges=charges,
velocities=velocities)
else:
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)]
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = list(np.unique(bond_arr[ix]))
bonds.remove(hub)
hub_spokes[hub] = bonds
# skip angle or dihedral searching if too few bonds or hubs
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 \
else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, k, j] for i, j in
itertools.combinations(v, 2)])
if dihedral:
hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)]
for i, j in hub_cons:
ks = [k for k in hub_spokes[i] if k != j]
ls = [l for l in hub_spokes[j] if l != i]
dihedral_list.extend([[k, i, j, l] for k,l in
itertools.product(ks, ls)
if k != l])
topologies = {k: v for k, v
in zip(SECTION_KEYWORDS["topology"][:3],
[bond_list, angle_list, dihedral_list])
if len(v) > 0}
topologies = None if len(topologies) == 0 else topologies
return cls(sites=molecule, ff_label=ff_label, charges=charges,
velocities=velocities, topologies=topologies)
class ForceField(MSONable):
"""
Class carrying most data in Masses and force field sections.
Attributes:
masses (pandas.DataFrame): DataFrame for Masses section.
force_field (dict): Force field section keywords (keys) and
data (values) as DataFrames.
maps (dict): Dict for labeling atoms and topologies.
"""
_is_valid = lambda self, df: not pd.isnull(df).values.any()
def __init__(self, mass_info, nonbond_coeffs=None, topo_coeffs=None):
"""
Args:
mass_into (list): List of atomic mass info. Elements,
strings (symbols) and floats are all acceptable for the
values, with the first two converted to the atomic mass
of an element. It is recommended to use
OrderedDict.items() to prevent key duplications.
[("C", 12.01), ("H", Element("H")), ("O", "O"), ...]
nonbond_coeffs [coeffs]: List of pair or pairij
coefficients, of which the sequence must be sorted
according to the species in mass_dict. Pair or PairIJ
determined by the length of list. Optional with default
to None.
topo_coeffs (dict): Dict with force field coefficients for
molecular topologies. Optional with default
to None. All four valid keys listed below are optional.
Each value is a list of dicts with non optional keys
"coeffs" and "types", and related class2 force field
keywords as optional keys.
{
"Bond Coeffs":
[{"coeffs": [coeff],
"types": [("C", "C"), ...]}, ...],
"Angle Coeffs":
[{"coeffs": [coeff],
"BondBond Coeffs": [coeff],
"types": [("H", "C", "H"), ...]}, ...],
"Dihedral Coeffs":
[{"coeffs": [coeff],
"BondBond13 Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
"Improper Coeffs":
[{"coeffs": [coeff],
"AngleAngle Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
}
Topology of same type or equivalent types (e.g.,
("C", "H") and ("H", "C") bonds) are NOT ALLOWED to
be defined MORE THAN ONCE with DIFFERENT coefficients.
"""
map_mass = lambda v: v.atomic_mass.real if isinstance(v, Element) \
else Element(v).atomic_mass.real if isinstance(v, string_types) \
else v
index, masses, self.mass_info, atoms_map = [], [], [], {}
for i, m in enumerate(mass_info):
index.append(i + 1)
mass = map_mass(m[1])
masses.append(mass)
self.mass_info.append((m[0], mass))
atoms_map[m[0]] = i + 1
self.masses = pd.DataFrame({"mass": masses}, index=index)
self.maps = {"Atoms": atoms_map}
ff_dfs = {}
self.nonbond_coeffs = nonbond_coeffs
if self.nonbond_coeffs:
ff_dfs.update(self._process_nonbond())
self.topo_coeffs = topo_coeffs
if self.topo_coeffs:
self.topo_coeffs = {k: v for k, v in self.topo_coeffs.items()
if k in SECTION_KEYWORDS["ff"][2:]}
for k in self.topo_coeffs.keys():
coeffs, mapper = self._process_topo(k)
ff_dfs.update(coeffs)
self.maps.update(mapper)
self.force_field = None if len(ff_dfs) == 0 else ff_dfs
def _process_nonbond(self):
pair_df = pd.DataFrame(self.nonbond_coeffs)
assert self._is_valid(pair_df), \
"Invalid nonbond coefficients with rows varying in length"
npair, ncoeff = pair_df.shape
pair_df.columns = ["coeff%d" % i for i in range(1, ncoeff + 1)]
nm = len(self.mass_info)
ncomb = int(nm * (nm + 1) / 2)
if npair == nm:
kw = "Pair Coeffs"
pair_df.index = range(1, nm + 1)
elif npair == ncomb:
kw = "PairIJ Coeffs"
ids = list(itertools.
combinations_with_replacement(range(1, nm + 1), 2))
id_df = pd.DataFrame(ids, columns=["id1", "id2"])
pair_df = pd.concat([id_df, pair_df], axis=1)
else:
raise ValueError("Expecting {} Pair Coeffs or "
"{} PairIJ Coeffs for {} atom types,"
" got {}".format(nm, ncomb, nm, npair))
return {kw: pair_df}
def _process_topo(self, kw):
def find_eq_types(label, section):
if section.startswith("Improper"):
label_arr = np.array(label)
seqs = [[0, 1, 2, 3], [0, 2, 1, 3],
[3, 1, 2, 0], [3, 2, 1, 0]]
return [tuple(label_arr[s]) for s in seqs]
else:
return [label] + [label[::-1]]
main_data, distinct_types = [], []
class2_data = {k: [] for k in self.topo_coeffs[kw][0].keys()
if k in CLASS2_KEYWORDS.get(kw, [])}
for i, d in enumerate(self.topo_coeffs[kw]):
main_data.append(d["coeffs"])
distinct_types.append(d["types"])
for k in class2_data.keys():
class2_data[k].append(d[k])
distinct_types = [set(itertools.
chain(*[find_eq_types(t, kw)
for t in dt])) for dt in distinct_types]
type_counts = sum([len(dt) for dt in distinct_types])
type_union = set.union(*distinct_types)
assert len(type_union) == type_counts, "Duplicated items found " \
"under different coefficients in %s" % kw
atoms = set(np.ravel(list(itertools.chain(*distinct_types))))
assert atoms.issubset(self.maps["Atoms"].keys()), \
"Undefined atom type found in %s" % kw
mapper = {}
for i, dt in enumerate(distinct_types):
for t in dt:
mapper[t] = i + 1
def process_data(data):
df = pd.DataFrame(data)
assert self._is_valid(df),\
"Invalid coefficients with rows varying in length"
n, c = df.shape
df.columns = ["coeff%d" % i for i in range(1, c + 1)]
df.index = range(1, n + 1)
return df
all_data = {kw: process_data(main_data)}
if class2_data:
all_data.update({k: process_data(v) for k, v
in class2_data.items()})
return all_data, {kw[:-7] + "s": mapper}
def to_file(self, filename):
"""
Saves object to a file in YAML format.
Args:
filename (str): Filename.
"""
d = {"mass_info": self.mass_info,
"nonbond_coeffs": self.nonbond_coeffs,
"topo_coeffs": self.topo_coeffs}
yaml = YAML(typ="safe")
with open(filename, "w") as f:
yaml.dump(d, f)
@classmethod
def from_file(cls, filename):
"""
Constructor that reads in a file in YAML format.
Args:
filename (str): Filename.
"""
yaml = YAML(typ="safe")
with open(filename, "r") as f:
d = yaml.load(f)
return cls.from_dict(d)
@classmethod
def from_dict(cls, d):
d["mass_info"] = [tuple(m) for m in d["mass_info"]]
if d.get("topo_coeffs"):
for v in d["topo_coeffs"].values():
for c in v:
c["types"] = [tuple(t) for t in c["types"]]
return cls(d["mass_info"], d["nonbond_coeffs"], d["topo_coeffs"])
def structure_2_lmpdata(structure, ff_elements=None, atom_style="charge"):
"""
Converts a structure to a LammpsData object with no force field
parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must be
present due to force field settings but not necessarily in
the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
Returns:
LammpsData
"""
s = structure.get_sorted_structure()
a, b, c = s.lattice.abc
m = s.lattice.matrix
xhi = a
xy = np.dot(m[1], m[0] / xhi)
yhi = np.sqrt(b ** 2 - xy ** 2)
xz = np.dot(m[2], m[0] / xhi)
yz = (np.dot(m[1], m[2]) - xy * xz) / yhi
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2)
box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]
box_tilt = [xy, xz, yz]
box_tilt = None if not any(box_tilt) else box_tilt
new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])
s.modify_lattice(new_latt)
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(s)
return LammpsData.from_ff_and_topologies(ff=ff, topologies=[topo],
box_bounds=box_bounds,
box_tilt=box_tilt,
atom_style=atom_style)
| mit |
kernc/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
djgagne/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
SanPen/GridCal | setup.py | 1 | 9976 | """
A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
"""
THIS FILE IS USED IN THE DOCS BUILDING
DO NOT DELETE!
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import os
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
from src.GridCal.__version__ import __GridCal_VERSION__
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(os.path.join(here, 'doc', 'about.rst'), encoding='utf-8') as f:
description = f.read()
# create the file MANIFEST.in
# f = open("MANIFEST.in", "w")
# for root, dirs, files in os.walk(os.path.join('src', 'GridCal')):
# # path = root.split(os.sep)
# for file in files:
# print(len(root) * '---', file)
# if file.endswith('.bim') or 'docs_build' in root:
# line = 'include ' + os.path.join(root, file)
# f.write(line + '\n')
# f.close()
base_path = os.path.join('src', 'GridCal')
packages = find_packages(where=base_path, exclude=['docs', 'test'])
packages = [os.path.join(base_path, p) for p in packages]
dependencies = ["PySide2>=5.15", # for now, 5.14 breaks the UI generation for development
"numpy>=1.14.0",
"scipy>=1.0.0",
"networkx>=2.1",
"pandas>=0.22",
"xlwt>=1.3.0",
"xlrd>=1.1.0",
"matplotlib>=2.1.1",
"qtconsole>=4.5.4",
"pyDOE>=0.3.8",
"pySOT>=0.2.1",
"openpyxl>=2.4.9",
"smopy>=0.0.6",
"chardet>=3.0.4",
"scikit-learn>=0.18",
"geopy>=1.16",
"pytest>=3.8",
"h5py>=2.9.0",
"numba>=0.46",
"folium",
"pytest>=3.8"]
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='GridCal', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__GridCal_VERSION__, # Required
# This is a one-line description or tag-line of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description=description, # Optional
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
# long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/SanPen/GridCal', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Santiago Peñate Vera and Michel Lavoie', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='[email protected]', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers and electrical engineers',
'Topic :: Software Development :: Power Systems',
# Pick your license as you wish
'License :: OSI Approved :: GPLv3',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='power systems planning', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=packages, # Required
include_package_data=True,
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. If you
# do not support Python 2, you can simplify this to '>=3.5' or similar, see
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires='>=3.7',
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=dependencies,
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data=package_data,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# data_files=package_data,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
# project_urls='', # optional
)
| gpl-3.0 |
gfyoung/pandas | pandas/tests/io/pytables/test_errors.py | 1 | 7763 | import datetime
from io import BytesIO
import re
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
HDFStore,
MultiIndex,
_testing as tm,
date_range,
read_hdf,
)
from pandas.tests.io.pytables.common import ensure_clean_path, ensure_clean_store
from pandas.io.pytables import Term, _maybe_adjust_name
pytestmark = pytest.mark.single
def test_pass_spec_to_storer(setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
msg = (
"cannot pass a column specification when reading a Fixed format "
"store. this store must be selected in its entirety"
)
with pytest.raises(TypeError, match=msg):
store.select("df", columns=["A"])
msg = (
"cannot pass a where specification when reading from a Fixed "
"format store. this store must be selected in its entirety"
)
with pytest.raises(TypeError, match=msg):
store.select("df", where=[("columns=A")])
def test_table_index_incompatible_dtypes(setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
msg = re.escape("incompatible kind in col [integer - datetime64]")
with pytest.raises(TypeError, match=msg):
store.put("frame", df2, format="table", append=True)
def test_unimplemented_dtypes_table_columns(setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
msg = re.escape(f"[{n}] is not implemented as a table column")
with pytest.raises(TypeError, match=msg):
store.append(f"df1_{n}", df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
msg = re.escape(
"""Cannot serialize the column [datetime1]
because its data contents are not [string] but [date] object dtype"""
)
with pytest.raises(TypeError, match=msg):
store.append("df_unimplemented", df)
def test_invalid_terms(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
msg = re.escape(
"__init__() missing 1 required positional argument: 'where'"
)
with pytest.raises(TypeError, match=msg):
Term()
# more invalid
msg = re.escape(
"cannot process expression [df.index[3]], "
"[2000-01-06 00:00:00] is not a valid condition"
)
with pytest.raises(ValueError, match=msg):
store.select("df", "df.index[3]")
msg = "invalid syntax"
with pytest.raises(SyntaxError, match=msg):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
msg = (
r"The passed where expression: A>0 or C>0\n\s*"
r"contains an invalid variable reference\n\s*"
r"all of the variable references must be a reference to\n\s*"
r"an axis \(e.g. 'index' or 'columns'\), or a data_column\n\s*"
r"The currently defined references are: index,columns\n"
)
with pytest.raises(ValueError, match=msg):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_append_with_diff_col_name_types_raises_value_error(setup_path):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({"a": np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({("1", 2): np.random.randn(10)})
df5 = DataFrame({("1", 2, object): np.random.randn(10)})
with ensure_clean_store(setup_path) as store:
name = f"df_{tm.rands(10)}"
store.append(name, df)
for d in (df2, df3, df4, df5):
msg = re.escape(
"cannot match existing table structure for [0] on appending data"
)
with pytest.raises(ValueError, match=msg):
store.append(name, d)
def test_invalid_complib(setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
with tm.ensure_clean(setup_path) as path:
msg = r"complib only supports \[.*\] compression."
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", complib="foolib")
@pytest.mark.parametrize(
"idx",
[
date_range("2019", freq="D", periods=3, tz="UTC"),
CategoricalIndex(list("abc")),
],
)
def test_to_hdf_multiindex_extension_dtype(idx, setup_path):
# GH 7775
mi = MultiIndex.from_arrays([idx, idx])
df = DataFrame(0, index=mi, columns=["a"])
with ensure_clean_path(setup_path) as path:
with pytest.raises(NotImplementedError, match="Saving a MultiIndex"):
df.to_hdf(path, "df")
def test_unsuppored_hdf_file_error(datapath):
# GH 9539
data_path = datapath("io", "data", "legacy_hdf/incompatible_dataset.h5")
message = (
r"Dataset\(s\) incompatible with Pandas data types, "
"not table, or no datasets found in HDF5 file."
)
with pytest.raises(ValueError, match=message):
pd.read_hdf(data_path)
def test_read_hdf_errors(setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
with ensure_clean_path(setup_path) as path:
msg = r"File [\S]* does not exist"
with pytest.raises(IOError, match=msg):
read_hdf(path, "key")
df.to_hdf(path, "df")
store = HDFStore(path, mode="r")
store.close()
msg = "The HDFStore must be open for reading."
with pytest.raises(IOError, match=msg):
read_hdf(store, "df")
def test_read_hdf_generic_buffer_errors():
msg = "Support for generic buffers has not been implemented."
with pytest.raises(NotImplementedError, match=msg):
read_hdf(BytesIO(b""), "df")
@pytest.mark.parametrize("bad_version", [(1, 2), (1,), [], "12", "123"])
def test_maybe_adjust_name_bad_version_raises(bad_version):
msg = "Version is incorrect, expected sequence of 3 integers"
with pytest.raises(ValueError, match=msg):
_maybe_adjust_name("values_block_0", version=bad_version)
| bsd-3-clause |
PyQuake/earthquakemodels | code/cocobbob/coco/code-postprocessing/cocopp/rungeneric.py | 1 | 10564 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Process data to be included in a latex template. Called via
python -m cocopp [OPTIONS] DATAFOLDER1 DATAFOLDER2 ...
For a detailed help, simply type
python -m cocopp
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import getopt
import warnings
import matplotlib
matplotlib.use('Agg') # To avoid window popup and use without X forwarding
# numpy.seterr(all='raise')
if __name__ == "__main__":
if 11 < 3:
print(matplotlib.rcsetup.all_backends)
# [u'GTK', u'GTKAgg', u'GTKCairo', u'MacOSX', u'Qt4Agg', u'Qt5Agg',
# u'TkAgg', u'WX', u'WXAgg', u'CocoaAgg', u'GTK3Cairo', u'GTK3Agg',
# u'WebAgg', u'nbAgg', u'agg', u'cairo', u'emf', u'gdk', u'pdf',
# u'pgf', u'ps', u'svg', u'template']
matplotlib.use('Agg') # To avoid window popup and use without X forwarding
matplotlib.rc('pdf', fonttype = 42)
# add ".." to the Python search path, import the module to which
# this script belongs to and call the main of this script from imported
# module. Like this all relative imports will work smoothly.
(filepath, filename) = os.path.split(sys.argv[0])
sys.path.append(os.path.join(filepath, os.path.pardir))
import cocopp
res = cocopp.rungeneric.main(sys.argv[1:])
sys.exit(res)
from . import genericsettings, rungeneric1, rungeneric2, rungenericmany, ppfig, toolsdivers
from .toolsdivers import truncate_latex_command_file, print_done
from .ppfig import Usage
from .compall import ppfigs
__all__ = ['main']
def _splitshortoptlist(shortoptlist):
"""Split short options list used by getopt.
Returns a set of the options.
"""
res = set()
tmp = shortoptlist[:]
# split into logical elements: one-letter that could be followed by colon
while tmp:
if len(tmp) > 1 and tmp[1] is ':':
res.add(tmp[0:2])
tmp = tmp[2:]
else:
res.add(tmp[0])
tmp = tmp[1:]
return res
def usage():
print(main.__doc__)
def main(argv=None):
r"""Main routine for post-processing data from COCO.
Synopsis::
python -m cocopp [data_folder [more_data_folders]]
For this call to work, the path to this package must be in python
search path, that is,
* it can be in the current working directory, or
* the path to the package was appended to the Python path, or
* the package was installed (which essentially copies the package
to a location which is in the path)
This routine will:
* call sub-routine :py:func:`cocopp.rungeneric1.main` for each
input argument; each input argument will be used as output
sub-folder relative to the main output folder,
* call either sub-routines :py:func:`cocopp.rungeneric2.main`
(2 input arguments) or :py:func:`cocopp.rungenericmany.main`
(more than 2) for the input arguments altogether.
The output figures and tables written by default to the output folder
:file:`ppdata` are used in the provided LaTeX templates:
* :file:`*article.tex` and :file:`*1*.tex`
for results with a **single** algorithm
* :file:`*cmp.tex` and :file:`*2*.tex`
for showing the comparison of **2** algorithms
* :file:`*many.tex` and :file:`*3*.tex`
for showing the comparison of **more than 2** algorithms.
The templates with `noisy` mentioned in the filename have to be used
for the noisy testbed, the others for the noise-less one.
These latex templates need to be copied in the current working directory
and possibly edited so that the LaTeX commands ``\bbobdatapath`` and
``\algfolder`` point to the correct output folders of the post-processing.
Compiling the template file with LaTeX should then produce a document.
Keyword arguments:
*argv* -- list of strings containing options and arguments. If not
provided, sys.argv is accessed.
*argv* must list folders containing COCO data files. Each of these
folders should correspond to the data of ONE algorithm.
Furthermore, argv can begin with facultative option flags.
-h, --help
displays this message.
-v, --verbose
verbose mode, prints out operations.
-o, --output-dir=OUTPUTDIR
changes the default output directory (:file:`ppdata`) to
:file:`OUTPUTDIR`.
--omit-single
omit calling :py:func:`cocopp.rungeneric1.main`, if
more than one data path argument is provided.
--no-rld-single-fcts
do not generate runlength distribution figures for each
single function.
--input-path=INPUTPATH
all folder/file arguments are prepended with the given value
which must be a valid path.
--in-a-hurry
takes values between 0 (default) and 1000, fast processing that
does not write eps files and uses a small number of bootstrap samples
--no-svg
do not generate the svg figures which are used in html files
Exceptions raised:
*Usage* -- Gives back a usage message.
Examples:
Printing out this help message::
$ python -m cocopp.rungeneric -h
Post-processing two algorithms in verbose mode::
$ python -m cocopp -v AMALGAM BIPOP-CMA-ES
From the python interpreter::
>> import cocopp as pp
>> pp.main('-o outputfolder folder1 folder2')
This will execute the post-processing on the data found in
:file:`folder1` and :file:`folder2`. The ``-o`` option changes the
output folder from the default :file:`ppdata` to
:file:`outputfolder`. The arguments can also be presented as
a list of strings.
"""
if argv is None:
argv = sys.argv[1:]
if not isinstance(argv, list) and str(argv) == argv: # get rid of .split in python shell
argv = argv.split()
try:
try:
opts, args = getopt.getopt(argv, genericsettings.shortoptlist,
genericsettings.longoptlist +
['omit-single', 'in-a-hurry=', 'input-path='])
except getopt.error, msg:
raise Usage(msg)
if not args:
usage()
sys.exit()
inputdir = '.'
#Process options
shortoptlist = list("-" + i.rstrip(":")
for i in _splitshortoptlist(genericsettings.shortoptlist))
shortoptlist.remove("-o")
longoptlist = list("--" + i.rstrip("=") for i in genericsettings.longoptlist)
genopts = []
outputdir = genericsettings.outputdir
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output-dir"):
outputdir = a
elif o in ("--in-a-hurry", ):
genericsettings.in_a_hurry = int(a)
if genericsettings.in_a_hurry:
print('in_a_hurry like ', genericsettings.in_a_hurry, ' (should finally be set to zero)')
elif o in ("--input-path", ):
inputdir = a
elif o in ("--no-svg"):
genericsettings.generate_svg_files = False
else:
isAssigned = False
if o in longoptlist or o in shortoptlist:
genopts.append(o)
# Append o and then a separately otherwise the list of
# command line arguments might be incorrect
if a:
genopts.append(a)
isAssigned = True
if o in ("-v", "--verbose"):
genericsettings.verbose = True
isAssigned = True
if o == '--omit-single':
isAssigned = True
if not isAssigned:
assert False, "unhandled option"
if (not genericsettings.verbose):
warnings.filterwarnings('module', '.*', UserWarning, '.*')
#warnings.simplefilter('ignore') # that is bad, but otherwise to many warnings appear
# print("\nPost-processing: will generate output " +
# "data in folder %s" % outputdir)
# print(" this might take several minutes.")
if not os.path.exists(outputdir):
os.makedirs(outputdir)
if genericsettings.verbose:
print('Folder %s was created.' % (outputdir))
latex_commands_filename = os.path.join(outputdir,
'cocopp_commands.tex')
truncate_latex_command_file(latex_commands_filename)
for i in range(len(args)): # prepend common path inputdir to all names
args[i] = os.path.join(inputdir, args[i])
for i, alg in enumerate(args):
# remove '../' from algorithm output folder
if len(args) == 1 or '--omit-single' not in dict(opts):
rungeneric1.main(genopts
+ ["-o", outputdir, alg])
if len(args) == 2:
rungeneric2.main(genopts + ["-o", outputdir] + args)
elif len(args) > 2:
rungenericmany.main(genopts + ["-o", outputdir] + args)
toolsdivers.prepend_to_file(latex_commands_filename,
['\\providecommand{\\cocoversion}{\\hspace{\\textwidth}\\scriptsize\\sffamily{}\\color{Gray}Data produced with COCO %s}' % (toolsdivers.get_version_label(None))]
)
toolsdivers.prepend_to_file(latex_commands_filename,
['\\providecommand{\\bbobecdfcaptionsinglefunctionssingledim}[1]{',
ppfigs.get_ecdfs_single_functions_single_dim_caption(), '}'])
open(os.path.join(outputdir,
'cocopp_commands.tex'), 'a').close()
ppfig.save_index_html_file(os.path.join(outputdir, genericsettings.index_html_file_name))
# ppdata file is now deprecated.
ppfig.save_index_html_file(os.path.join(outputdir, 'ppdata'))
print_done()
#TODO prevent loading the data every time...
except Usage, err:
print(err.msg, file=sys.stderr)
print("For help use -h or --help", file=sys.stderr)
return 2
if __name__ == "__main__":
res = main()
if genericsettings.test:
print(res)
# sys.exit(res)
| bsd-3-clause |
kdebrab/pandas | pandas/tests/groupby/test_transform.py | 1 | 27597 | """ test with the .transform """
import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range
from pandas.core.dtypes.common import (
ensure_platform_int, is_timedelta64_dtype)
from pandas.compat import StringIO
from pandas._libs import groupby
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby.groupby import DataError
from pandas.core.config import option_context
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def test_transform():
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype='int64').reshape(
3, 2), columns=["a", "b"], index=[0, 2, 1])
key = [0, 0, 1]
expected = df.sort_index().groupby(key).transform(
lambda x: x - x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.Grouper(freq='M'))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({'a': range(5, 10), 'b': range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b': range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast():
df = DataFrame({'id': np.arange(100000) / 3,
'val': np.random.randn(100000)})
grp = df.groupby('id')['val']
values = np.repeat(grp.mean().values,
ensure_platform_int(grp.count().values))
expected = pd.Series(values, index=df.index, name='val')
result = grp.transform(np.mean)
assert_series_equal(result, expected)
result = grp.transform('mean')
assert_series_equal(result, expected)
# GH 12737
df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],
'd': pd.date_range('2014-1-1', '2014-1-4'),
'i': [1, 2, 3, 4]},
columns=['grouping', 'f', 'i', 'd'])
result = df.groupby('grouping').transform('first')
dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]
expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],
'd': dates,
'i': [1, 2, 2, 4]},
columns=['f', 'i', 'd'])
assert_frame_equal(result, expected)
# selection
result = df.groupby('grouping')[['f', 'i']].transform('first')
expected = expected[['f', 'i']]
assert_frame_equal(result, expected)
# dup columns
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])
result = df.groupby('g').transform('first')
expected = df.drop('g', axis=1)
assert_frame_equal(result, expected)
def test_transform_broadcast(tsframe, ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, ts.index)
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
tm.assert_index_equal(result.columns, tsframe.columns)
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis(tsframe):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(np.random.randn(r, c),
index=base.index,
columns=base.columns,
dtype='float64')
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
def test_transform_dtype():
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug():
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
result = df.groupby('A')['B'].transform(
lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name='B')
assert_series_equal(result, expected)
def test_transform_numeric_to_boolean():
# GH 16875
# inconsistency in transforming boolean values
expected = pd.Series([True, True], name='A')
df = pd.DataFrame({'A': [1.1, 2.2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
df = pd.DataFrame({'A': [1, 2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
def test_transform_datetime_to_timedelta():
# GH 15429
# transforming a datetime to timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = pd.Series([
Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')
# this does date math without changing result type in transform
base_time = df['A'][0]
result = df.groupby('A')['A'].transform(
lambda x: x.max() - x.min() + base_time) - base_time
assert_series_equal(result, expected)
# this does date math and causes the transform to return timedelta
result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())
assert_series_equal(result, expected)
def test_transform_datetime_to_numeric():
# GH 10972
# convert dt to float
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())
expected = Series([-0.5, 0.5], name='b')
assert_series_equal(result, expected)
# convert dt to int
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.min())
expected = Series([0, 1], name='b')
assert_series_equal(result, expected)
def test_transform_casting():
# 13046
data = """
idx A ID3 DATETIME
0 B-028 b76cd912ff "2014-10-08 13:43:27"
1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
2 B-076 1a682034f8 "2014-10-08 14:29:01"
3 B-023 b76cd912ff "2014-10-08 18:39:34"
4 B-023 f88g8d7sds "2014-10-08 18:40:18"
5 B-033 b76cd912ff "2014-10-08 18:44:30"
6 B-032 b76cd912ff "2014-10-08 18:46:00"
7 B-037 b76cd912ff "2014-10-08 18:52:15"
8 B-046 db959faf02 "2014-10-08 18:59:59"
9 B-053 b76cd912ff "2014-10-08 19:17:48"
10 B-065 b76cd912ff "2014-10-08 19:21:38"
"""
df = pd.read_csv(StringIO(data), sep=r'\s+',
index_col=[0], parse_dates=['DATETIME'])
result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.dtype)
result = df[['ID3', 'DATETIME']].groupby('ID3').transform(
lambda x: x.diff())
assert is_timedelta64_dtype(result.DATETIME.dtype)
def test_transform_multiple(ts):
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(tsframe):
df = tsframe[::5].reindex(tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(df):
f = lambda x: x.mean()
result = df.groupby('A')['C', 'D'].transform(f)
selection = df[['C', 'D']]
expected = selection.groupby(df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(df):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(df):
result = df.groupby('A').transform('mean')
expected = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = df.groupby('A')['C'].transform('mean')
expected = df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_series_fast_transform_date():
# GH 13191
df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],
'd': pd.date_range('2014-1-1', '2014-1-4')})
result = df.groupby('grouping')['d'].transform('first')
dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-4')]
expected = pd.Series(dates, name='d')
assert_series_equal(result, expected)
def test_transform_length():
# GH 9697
df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
expected = pd.Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_transform_coercion():
# 14457
# when we are transforming be sure to not coerce
# via assignment
df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
g = df.groupby('A')
expected = g.transform(np.mean)
result = g.transform(lambda x: np.mean(x))
assert_frame_equal(result, expected)
def test_groupby_transform_with_int():
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
C=Series(
[1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=Series(
[-1, 0, 1, -1, 0, 1], dtype='float64')))
assert_frame_equal(result, expected)
# int case
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
C=[1, 2, 3, 1, 2, 3], D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
assert_frame_equal(result, expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x * 2 / 2)
expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
assert_frame_equal(result, expected)
def test_groupby_transform_with_nan_group():
# GH 9941
df = pd.DataFrame({'a': range(10),
'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)['a'].transform(max)
expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],
name='a')
assert_series_equal(result, expected)
def test_transform_mixed_type():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
assert result['d'].dtype == np.float64
# this is by definition a mutating operation!
with option_context('mode.chained_assignment', None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.loc[key])
def test_cython_group_transform_algos():
# GH 4095
dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint32,
np.uint64, np.float32, np.float64]
ops = [(groupby.group_cumprod_float64, np.cumproduct, [np.float64]),
(groupby.group_cumsum, np.cumsum, dtypes)]
is_datetimelike = False
for pd_op, np_op, dtypes in ops:
for dtype in dtypes:
data = np.array([[1], [2], [3], [4]], dtype=dtype)
ans = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
pd_op(ans, data, labels, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
check_dtype=False)
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumsum(actual, data, labels, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
groupby.group_cumsum(actual, data.view('int64'), labels,
is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_series(op, args, targop):
# GH 4095
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(
expected,
data.groupby(labels).transform(op, *args))
tm.assert_series_equal(expected, getattr(
data.groupby(labels), op)(*args))
@pytest.mark.parametrize("op", ['cumprod', 'cumsum'])
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize('input, exp', [
# When everything is NaN
({'key': ['b'] * 10, 'value': np.nan},
pd.Series([np.nan] * 10, name='value')),
# When there is a single NaN
({'key': ['b'] * 10 + ['a'] * 2,
'value': [3] * 3 + [np.nan] + [3] * 8},
{('cumprod', False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
('cumprod', True): [3.0, 9.0, 27.0, np.nan, 81., 243., 729.,
2187., 6561., 19683., 3.0, 9.0],
('cumsum', False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
('cumsum', True): [3.0, 6.0, 9.0, np.nan, 12., 15., 18.,
21., 24., 27., 3.0, 6.0]})])
def test_groupby_cum_skipna(op, skipna, input, exp):
df = pd.DataFrame(input)
result = df.groupby('key')['value'].transform(op, skipna=skipna)
if isinstance(exp, dict):
expected = exp[(op, skipna)]
else:
expected = exp
expected = pd.Series(expected, name='value')
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_frame(op, args, targop):
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
strings = list('qwertyuiopasdfghjklz')
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame({'float': s,
'float_missing': s_missing,
'int': [1, 1, 1, 1, 2] * 200,
'datetime': pd.date_range('1990-1-1', periods=1000),
'timedelta': pd.timedelta_range(1, freq='s',
periods=1000),
'string': strings * 50,
'string_missing': strings_missing * 50},
columns=['float', 'float_missing', 'int', 'datetime',
'timedelta', 'string', 'string_missing'])
df['cat'] = df['string'].astype('category')
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
]: # dict(by='string_missing')]:
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
# whitelisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == 'shift':
gb._set_group_selection()
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply separately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected,
gb.transform(op, *args).sort_index(
axis=1))
tm.assert_frame_equal(
expected,
getattr(gb, op)(*args).sort_index(axis=1))
# individual columns
for c in df:
if c not in ['float', 'int', 'float_missing'
] and op != 'shift':
pytest.raises(DataError, gb[c].transform, op)
pytest.raises(DataError, getattr(gb[c], op))
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected,
gb[c].transform(op, *args))
tm.assert_series_equal(expected,
getattr(gb[c], op)(*args))
def test_transform_with_non_scalar_group():
# GH 10165
cols = pd.MultiIndex.from_tuples([
('syn', 'A'), ('mis', 'A'), ('non', 'A'),
('syn', 'C'), ('mis', 'C'), ('non', 'C'),
('syn', 'T'), ('mis', 'T'), ('non', 'T'),
('syn', 'G'), ('mis', 'G'), ('non', 'G')])
df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
columns=cols,
index=['A', 'C', 'G', 'T'])
tm.assert_raises_regex(ValueError, 'transform must return '
'a scalar value for each '
'group.*',
df.groupby(axis=1, level=1).transform,
lambda z: z.div(z.sum(axis=1), axis=0))
@pytest.mark.parametrize('cols,exp,comp_func', [
('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),
(['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),
tm.assert_frame_equal)
])
@pytest.mark.parametrize('agg_func', [
'count', 'rank', 'size'])
def test_transform_numeric_ret(cols, exp, comp_func, agg_func):
if agg_func == 'size' and isinstance(cols, list):
pytest.xfail("'size' transformation not supported with "
"NDFrameGroupy")
# GH 19200
df = pd.DataFrame(
{'a': pd.date_range('2018-01-01', periods=3),
'b': range(3),
'c': range(7, 10)})
result = df.groupby('b')[cols].transform(agg_func)
if agg_func == 'rank':
exp = exp.astype('float')
comp_func(result, exp)
@pytest.mark.parametrize("mix_groupings", [True, False])
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize("val1,val2", [
('foo', 'bar'), (1, 2), (1., 2.)])
@pytest.mark.parametrize("fill_method,limit,exp_vals", [
("ffill", None,
[np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),
("ffill", 1,
[np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),
("bfill", None,
['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),
("bfill", 1,
[np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])
])
def test_group_fill_methods(mix_groupings, as_series, val1, val2,
fill_method, limit, exp_vals):
vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
_exp_vals = list(exp_vals)
# Overwrite placeholder values
for index, exp_val in enumerate(_exp_vals):
if exp_val == 'val1':
_exp_vals[index] = val1
elif exp_val == 'val2':
_exp_vals[index] = val2
# Need to modify values and expectations depending on the
# Series / DataFrame that we ultimately want to generate
if mix_groupings: # ['a', 'b', 'a, 'b', ...]
keys = ['a', 'b'] * len(vals)
def interweave(list_obj):
temp = list()
for x in list_obj:
temp.extend([x, x])
return temp
_exp_vals = interweave(_exp_vals)
vals = interweave(vals)
else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
keys = ['a'] * len(vals) + ['b'] * len(vals)
_exp_vals = _exp_vals * 2
vals = vals * 2
df = DataFrame({'key': keys, 'val': vals})
if as_series:
result = getattr(
df.groupby('key')['val'], fill_method)(limit=limit)
exp = Series(_exp_vals, name='val')
assert_series_equal(result, exp)
else:
result = getattr(df.groupby('key'), fill_method)(limit=limit)
exp = DataFrame({'key': keys, 'val': _exp_vals})
assert_frame_equal(result, exp)
@pytest.mark.parametrize("fill_method", ['ffill', 'bfill'])
def test_pad_stable_sorting(fill_method):
# GH 21207
x = [0] * 20
y = [np.nan] * 10 + [1] * 10
if fill_method == 'bfill':
y = y[::-1]
df = pd.DataFrame({'x': x, 'y': y})
expected = df.copy()
result = getattr(df.groupby('x'), fill_method)()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
(1, 'bfill', None), (1, 'bfill', 1),
(-1, 'ffill', None), (-1, 'ffill', 1),
(-1, 'bfill', None), (-1, 'bfill', 1)])
def test_pct_change(test_series, periods, fill_method, limit):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
exp_vals = Series(vals).pct_change(periods=periods,
fill_method=fill_method,
limit=limit).tolist()
df = DataFrame({'key': ['a'] * len(vals) + ['b'] * len(vals),
'vals': vals * 2})
grp = df.groupby('key')
def get_result(grp_obj):
return grp_obj.pct_change(periods=periods,
fill_method=fill_method,
limit=limit)
if test_series:
exp = pd.Series(exp_vals * 2)
exp.name = 'vals'
grp = grp['vals']
result = get_result(grp)
tm.assert_series_equal(result, exp)
else:
exp = DataFrame({'vals': exp_vals * 2})
result = get_result(grp)
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("func", [np.any, np.all])
def test_any_all_np_func(func):
# GH 20653
df = pd.DataFrame([['foo', True],
[np.nan, True],
['foo', True]], columns=['key', 'val'])
exp = pd.Series([True, np.nan, True], name='val')
res = df.groupby('key')['val'].transform(func)
tm.assert_series_equal(res, exp)
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/jupyter_core/tests/dotipython_empty/profile_default/ipython_kernel_config.py | 24 | 15358 | # Configuration file for ipython-kernel.
c = get_config()
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp, ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# Reraise exceptions encountered loading IPython extensions?
# c.IPKernelApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# The IPython profile to use.
# c.IPKernelApp.profile = 'default'
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = ''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
#
# c.IPKernelApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = <class 'IPython.kernel.zmq.ipkernel.IPythonKernel'>
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPKernelApp.ip = ''
#------------------------------------------------------------------------------
# IPythonKernel configuration
#------------------------------------------------------------------------------
# IPythonKernel will inherit config from: Kernel
#
# c.IPythonKernel._execute_sleep = 0.0005
# Whether to use appnope for compatiblity with OS X App Nap.
#
# Only affects OS X >= 10.9.
# c.IPythonKernel._darwin_app_nap = True
#
# c.IPythonKernel._poll_interval = 0.05
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.history_length = 10000
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQInteractiveShell.display_page = False
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.quiet = False
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.ZMQInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'LightBG'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.separate_out2 = ''
# The part of the banner to be printed after the profile
# c.ZMQInteractiveShell.banner2 = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQInteractiveShell.logappend = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| gpl-3.0 |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tests/test_msgpack/test_sequnpack.py | 9 | 3317 | #!/usr/bin/env python
# coding: utf-8
import unittest
from pandas import compat
from pandas.msgpack import Unpacker, BufferFull
from pandas.msgpack import OutOfData
class TestPack(unittest.TestCase):
def test_partialdata(self):
unpacker = Unpacker()
unpacker.feed(b'\xa5')
self.assertRaises(StopIteration, next, iter(unpacker))
unpacker.feed(b'h')
self.assertRaises(StopIteration, next, iter(unpacker))
unpacker.feed(b'a')
self.assertRaises(StopIteration, next, iter(unpacker))
unpacker.feed(b'l')
self.assertRaises(StopIteration, next, iter(unpacker))
unpacker.feed(b'l')
self.assertRaises(StopIteration, next, iter(unpacker))
unpacker.feed(b'o')
assert next(iter(unpacker)) == b'hallo'
def test_foobar(self):
unpacker = Unpacker(read_size=3, use_list=1)
unpacker.feed(b'foobar')
assert unpacker.unpack() == ord(b'f')
assert unpacker.unpack() == ord(b'o')
assert unpacker.unpack() == ord(b'o')
assert unpacker.unpack() == ord(b'b')
assert unpacker.unpack() == ord(b'a')
assert unpacker.unpack() == ord(b'r')
self.assertRaises(OutOfData, unpacker.unpack)
unpacker.feed(b'foo')
unpacker.feed(b'bar')
k = 0
for o, e in zip(unpacker, 'foobarbaz'):
assert o == ord(e)
k += 1
assert k == len(b'foobar')
def test_foobar_skip(self):
unpacker = Unpacker(read_size=3, use_list=1)
unpacker.feed(b'foobar')
assert unpacker.unpack() == ord(b'f')
unpacker.skip()
assert unpacker.unpack() == ord(b'o')
unpacker.skip()
assert unpacker.unpack() == ord(b'a')
unpacker.skip()
self.assertRaises(OutOfData, unpacker.unpack)
def test_maxbuffersize(self):
self.assertRaises(ValueError, Unpacker, read_size=5, max_buffer_size=3)
unpacker = Unpacker(read_size=3, max_buffer_size=3, use_list=1)
unpacker.feed(b'fo')
self.assertRaises(BufferFull, unpacker.feed, b'ob')
unpacker.feed(b'o')
assert ord('f') == next(unpacker)
unpacker.feed(b'b')
assert ord('o') == next(unpacker)
assert ord('o') == next(unpacker)
assert ord('b') == next(unpacker)
def test_readbytes(self):
unpacker = Unpacker(read_size=3)
unpacker.feed(b'foobar')
assert unpacker.unpack() == ord(b'f')
assert unpacker.read_bytes(3) == b'oob'
assert unpacker.unpack() == ord(b'a')
assert unpacker.unpack() == ord(b'r')
# Test buffer refill
unpacker = Unpacker(compat.BytesIO(b'foobar'), read_size=3)
assert unpacker.unpack() == ord(b'f')
assert unpacker.read_bytes(3) == b'oob'
assert unpacker.unpack() == ord(b'a')
assert unpacker.unpack() == ord(b'r')
def test_issue124(self):
unpacker = Unpacker()
unpacker.feed(b'\xa1?\xa1!')
assert tuple(unpacker) == (b'?', b'!')
assert tuple(unpacker) == ()
unpacker.feed(b"\xa1?\xa1")
assert tuple(unpacker) == (b'?', )
assert tuple(unpacker) == ()
unpacker.feed(b"!")
assert tuple(unpacker) == (b'!', )
assert tuple(unpacker) == ()
| mit |
kdebrab/pandas | pandas/tests/frame/test_rank.py | 2 | 10691 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas.util.testing as tm
from distutils.version import LooseVersion
from datetime import timedelta, datetime
from numpy import nan
from pandas.util.testing import assert_frame_equal
from pandas.tests.frame.common import TestData
from pandas import Series, DataFrame
class TestRank(TestData):
s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
df = DataFrame({'A': s, 'B': s})
results = {
'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
3.5, 1.5, 8.0, nan, 5.5]),
'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
}
@pytest.fixture(params=['average', 'min', 'max', 'first', 'dense'])
def method(self, request):
"""
Fixture for trying all rank methods
"""
return request.param
def test_rank(self):
rankdata = pytest.importorskip('scipy.stats.rankdata')
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
tm.assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
tm.assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
tm.assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
tm.assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False, ascending=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1., nan, 2.],
[2., 1., 3.]])
result = df.rank(1, numeric_only=False, ascending=False)
tm.assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]})
tm.assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
rankdata = pytest.importorskip('scipy.stats.rankdata')
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
tm.assert_almost_equal(ranks0.values, exp0)
tm.assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
tm.assert_numpy_array_equal(ranks0.values, exp0)
tm.assert_numpy_array_equal(ranks1.values, exp1)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = DataFrame([[2, 1], [4, 3]])
tm.assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))
tm.assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
def test_rank_methods_frame(self):
pytest.importorskip('scipy.stats.special')
rankdata = pytest.importorskip('scipy.stats.rankdata')
import scipy
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
cols = [chr(ord('z') - i) for i in range(xs.shape[1])]
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
for ax in [0, 1]:
for m in ['average', 'min', 'max', 'first', 'dense']:
result = df.rank(axis=ax, method=m)
sprank = np.apply_along_axis(
rankdata, ax, vals,
m if m != 'first' else 'ordinal')
sprank = sprank.astype(np.float64)
expected = DataFrame(sprank, columns=cols)
if (LooseVersion(scipy.__version__) >=
LooseVersion('0.17.0')):
expected = expected.astype('float64')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('dtype', ['O', 'f8', 'i8'])
def test_rank_descending(self, method, dtype):
if 'i' in dtype:
df = self.df.dropna()
else:
df = self.df.astype(dtype)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
assert_frame_equal(res, expected)
if method == 'first' and dtype == 'O':
return
expected = (df.max() - df).rank(method=method)
if dtype != 'O':
res2 = df.rank(method=method, ascending=False,
numeric_only=True)
assert_frame_equal(res2, expected)
res3 = df.rank(method=method, ascending=False,
numeric_only=False)
assert_frame_equal(res3, expected)
@pytest.mark.parametrize('axis', [0, 1])
@pytest.mark.parametrize('dtype', [None, object])
def test_rank_2d_tie_methods(self, method, axis, dtype):
df = self.df
def _check2d(df, expected, method='average', axis=0):
exp_df = DataFrame({'A': expected, 'B': expected})
if axis == 1:
df = df.T
exp_df = exp_df.T
result = df.rank(method=method, axis=axis)
assert_frame_equal(result, exp_df)
disabled = set([(object, 'first')])
if (dtype, method) in disabled:
return
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, self.results[method], method=method, axis=axis)
@pytest.mark.parametrize(
"method,exp", [("dense",
[[1., 1., 1.],
[1., 0.5, 2. / 3],
[1., 0.5, 1. / 3]]),
("min",
[[1. / 3, 1., 1.],
[1. / 3, 1. / 3, 2. / 3],
[1. / 3, 1. / 3, 1. / 3]]),
("max",
[[1., 1., 1.],
[1., 2. / 3, 2. / 3],
[1., 2. / 3, 1. / 3]]),
("average",
[[2. / 3, 1., 1.],
[2. / 3, 0.5, 2. / 3],
[2. / 3, 0.5, 1. / 3]]),
("first",
[[1. / 3, 1., 1.],
[2. / 3, 1. / 3, 2. / 3],
[3. / 3, 2. / 3, 1. / 3]])])
def test_rank_pct_true(self, method, exp):
# see gh-15630.
df = DataFrame([[2012, 66, 3], [2012, 65, 2], [2012, 65, 1]])
result = df.rank(method=method, pct=True)
expected = DataFrame(exp)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
andreiapostoae/dota2-predictor | visualizing/hero_map.py | 2 | 13950 | import collections
import logging
import math
import random
import numpy as np
import pandas as pd
import plotly.plotly as py
import plotly.graph_objs as go
import tensorflow as tf
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from tools.metadata import get_hero_dict, get_last_patch
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
data_index = 0
def _build_vocabulary(words, vocabulary_size):
""" Creates a dictionary representing a vocabulary and counts the appearances of each word
In this context, each word represents a hero's index casted to string e.g. Anti-Mage -> "1"
Args:
words: list of strings representing the corpus
vocabulary_size: number of words to be evaluated (the vocabulary will contain only this
number of words, even if there are more unique words in the corpus)
Returns:
data: list of indices obtained by mapping the words' indices to the corpus
count: list of [word, appearances] for the corpus
dictionary: the vocabulary (the key is the word, and the value is the appearances)
reverse_dictionary: the reversed vocabulary (the key are the appearances and the values
are the words)
"""
# create dictionary with the most common heroes
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
# the word is unknown
index = 0
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
# save the dictionary's reversed version for later usage
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def _generate_batch(data, batch_size, num_skips, window_size):
""" Generates a batch of data to be used in training using the skip-gram flavor of word2vec
Args:
data: list of indices obtained by mapping the words' indices to the corpus
batch_size: number of samples to be used in a batch
num_skips: number of skips hyperparameter of word2vec
window_size: window size hyperparameter of word2vec
Returns:
batch: batch of data to be used in training
labels: labels of each sample in the batch
"""
global data_index
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * window_size + 1
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
# target label at the center of the buffer
target = window_size
targets_to_avoid = [window_size]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[window_size]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
def _train_word2vec(data,
batch_size,
vocabulary_size,
embedding_size,
neg_samples,
window_size,
num_steps,
reverse_dictionary,
heroes_dict):
""" Given input data and hyperparameters, train the dataset of games using word2vec with
skip-gram flavor
Args:
data: list of indices obtained by mapping the words' indices to the corpus
batch_size: number of samples to be used in a batch
vocabulary_size: number of words to be evaluated (the vocabulary will contain only this
number of words, even if there are more unique words in the corpus)
embedding_size: number of dimensions when creating word embeddings
neg_samples: word2vec negative samples hyperparameter
window_size: word2vec window size hyperparameter
num_steps: number of steps to train for at (at least 10k should be fine)
window_size: window size hyperparameter of
reverse_dictionary: the reversed vocabulary (the key are the appearances and the values
are the words)
heroes_dict: dictionary that maps the hero's ID to its name
Returns:
final_embeddings: np.array of (samples, embedding_size) dimension corresponding to each
hero's embeddings
"""
valid_size = 15
valid_examples = np.random.randint(0, vocabulary_size, valid_size)
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.float32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(softmax_weights,
softmax_biases,
train_labels,
embed,
neg_samples,
vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
with tf.Session(graph=graph) as session:
session.run(tf.global_variables_initializer())
logger.info('Initialized graph')
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels = _generate_batch(data,
batch_size,
2 * window_size,
window_size)
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, new_loss = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += new_loss
# print the loss every 2k steps
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
logger.info('Average loss at step %d: %f', step, average_loss)
average_loss = 0
# print a sample of similarities between heroes every 10k steps
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
# ignore unknown and padding tokens
if valid_word != 'UNK' and valid_word != 'PAD':
valid_word = heroes_dict[int(reverse_dictionary[valid_examples[i]])]
top_k = 8 # number of nearest neighbors to print
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
index = reverse_dictionary[nearest[k]]
if index != 'UNK' and index != 'PAD':
close_word = heroes_dict[int(index)]
else:
close_word = index
log = '%s %s,' % (log, close_word)
logger.info(log)
final_embeddings = normalized_embeddings.eval()
return final_embeddings
def _plot_similarities(embeddings, heroes_dict, reverse_dictionary, perplexity=20):
""" Plot the obtained hero embeddings using TSNE algorithm in 2D space.
There are 4 assumed roles: Mid, Carry, Offlaner, Support, each category containing a
representative hardcoded hero in order to correctly identify each cluster's role.
Args:
embeddings: hero embeddings obtained after training
heroes_dict: dictionary that maps the hero's ID to its name
reverse_dictionary: the reversed vocabulary (the key are the appearances and the values
are the words)
perplexity: hyperparameter of TSNE (15-30 seems to work best)
"""
# Reduce the embeddings to 2D
tsne = TSNE(n_components=2, perplexity=perplexity, random_state=42)
two_d_embeddings = tsne.fit_transform(embeddings)
# Apply KMeans on the data in order to clusterize by role
kmeans = KMeans(n_clusters=4, n_jobs=-1)
kmeans.fit(tsne.embedding_)
labels = kmeans.labels_
labels = labels[2:]
x_vals = two_d_embeddings[:, 0]
y_vals = two_d_embeddings[:, 1]
number_of_heroes = len(heroes_dict.keys())
names = number_of_heroes * ['']
for i in range(number_of_heroes):
names[i] = heroes_dict[int(reverse_dictionary[i + 2])]
x_vals = list(x_vals)
y_vals = list(y_vals)
# delete 'UNK' and 'PAD' when plotting
del x_vals[1]
del x_vals[0]
del y_vals[1]
del y_vals[0]
traces = []
for cluster in range(max(labels) + 1):
indices = []
for i in range(len(labels)):
if labels[i] == cluster:
indices.append(i)
cluster_text = 'Mixed'
heroes_in_cluster = [names[i] for i in indices]
if 'Terrorblade' in heroes_in_cluster:
cluster_text = 'Carry'
elif 'Shadow Fiend' in heroes_in_cluster:
cluster_text = 'Mid'
elif 'Batrider' in heroes_in_cluster:
cluster_text = 'Offlane'
elif 'Dazzle' in heroes_in_cluster:
cluster_text = 'Support'
trace = go.Scatter(
x=[x_vals[i] for i in indices],
y=[y_vals[i] for i in indices],
mode='markers+text',
text=[names[i] for i in indices],
name=cluster_text,
textposition='top'
)
traces.append(trace)
layout = go.Layout(
title='Hero map test',
xaxis=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
autotick=True,
ticks='',
showticklabels=False
),
yaxis=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
autotick=True,
ticks='',
showticklabels=False
)
)
data = traces
figure = go.Figure(data=data, layout=layout)
py.iplot(figure, filename='heromap')
def plot_hero_map(csv_path,
batch_size=128,
embedding_size=25,
window_size=2,
neg_samples=64,
num_steps=30001,
low_mmr=0,
high_mmr=9000):
""" Creates a 2D plot of the heroes based on their similarity obtained with word2vec. The result
is uploaded to plotly.
Args:
csv_path: path to the training dataset csv
batch_size: size of the batch to be used in training
embedding_size: number of dimensions when creating word embeddings
window_size: word2vec window size hyperparameter
neg_samples: word2vec negative samples hyperparameter
num_steps: number of steps to train for at (at least 10k should be fine)
low_mmr: lower bound of the MMRs filtered for plotting
high_mmr: upper bound of the MMRs filtered for plotting
"""
patch = get_last_patch()
heroes_dict = get_hero_dict()
dataset = pd.read_csv(csv_path)
# filter the games by MMR and transform the dataset to a numpy array
dataset = dataset[(dataset.avg_mmr > low_mmr) & (dataset.avg_mmr < high_mmr)].values
vocabulary_size = patch['heroes_released'] + 1
words = []
# create corpus by separating each team and adding padding
for match in dataset:
radiant_list = match[2].split(',')
dire_list = match[3].split(',')
words.extend(radiant_list)
words.append('PAD')
words.append('PAD')
words.extend(dire_list)
words.append('PAD')
words.append('PAD')
# create vocabulary using the corpus
data, count, dictionary, reverse_dictionary = _build_vocabulary(words, vocabulary_size)
logger.info('Most common heroes (+UNK): %s', count[:5])
logger.info('Sample data: %s', data[:10])
# free unused memory
del words
final_embeddings = _train_word2vec(data,
batch_size,
vocabulary_size,
embedding_size,
neg_samples,
window_size,
num_steps,
reverse_dictionary,
heroes_dict)
_plot_similarities(final_embeddings, heroes_dict, reverse_dictionary)
| mit |
berquist/pyquante2 | tests/pyq1_comparison.py | 3 | 8542 | # This is a simplified version of the PyQuante1 and pyquante2 dft loops to work out
# the bugs when implementing pyquante2.
import numpy as np
def pyq1_rohf(atomtuples=[(2,(0,0,0))],basis = '6-31G**',maxit=10,mult=3):
from PyQuante import Ints,settings,Molecule
from PyQuante.hartree_fock import get_energy
from PyQuante.MG2 import MG2 as MolecularGrid
from PyQuante.LA2 import mkdens,geigh,trace2,simx
from PyQuante.Ints import getJ,getK
print ("PyQ1 ROHF run")
atoms = Molecule('Pyq1',atomlist=atomtuples,multiplicity=mult)
bfs = Ints.getbasis(atoms,basis=basis)
S,h,I2e = Ints.getints(bfs,atoms)
nbf = norbs = len(bfs)
nel = atoms.get_nel()
nalpha,nbeta = atoms.get_alphabeta()
enuke = atoms.get_enuke()
orbe,orbs = geigh(h,S)
eold = 0
for i in range(maxit):
Da = mkdens(orbs,0,nalpha)
Db = mkdens(orbs,0,nbeta)
Ja = getJ(I2e,Da)
Jb = getJ(I2e,Db)
Ka = getK(I2e,Da)
Kb = getK(I2e,Db)
Fa = h+Ja+Jb-Ka
Fb = h+Ja+Jb-Kb
energya = get_energy(h,Fa,Da)
energyb = get_energy(h,Fb,Db)
eone = (trace2(Da,h) + trace2(Db,h))/2
etwo = (trace2(Da,Fa) + trace2(Db,Fb))/2
energy = (energya+energyb)/2 + enuke
print (i,energy,eone,etwo,enuke)
if abs(energy-eold) < 1e-5: break
eold = energy
Fa = simx(Fa,orbs)
Fb = simx(Fb,orbs)
# Building the approximate Fock matrices in the MO basis
F = 0.5*(Fa+Fb)
K = Fb-Fa
# The Fock matrix now looks like
# F-K | F + K/2 | F
# ---------------------------------
# F + K/2 | F | F - K/2
# ---------------------------------
# F | F - K/2 | F + K
# Make explicit slice objects to simplify this
do = slice(0,nbeta)
so = slice(nbeta,nalpha)
uo = slice(nalpha,norbs)
F[do,do] -= K[do,do]
F[uo,uo] += K[uo,uo]
F[do,so] += 0.5*K[do,so]
F[so,do] += 0.5*K[so,do]
F[so,uo] -= 0.5*K[so,uo]
F[uo,so] -= 0.5*K[uo,so]
orbe,mo_orbs = np.linalg.eigh(F)
orbs = np.dot(orbs,mo_orbs)
return energy,orbe,orbs
def pyq1_rohf(atomtuples=[(2,(0,0,0))],basis = '6-31G**',maxit=10,mult=3):
from PyQuante import Ints,settings,Molecule
from PyQuante.hartree_fock import get_energy
from PyQuante.MG2 import MG2 as MolecularGrid
from PyQuante.LA2 import mkdens,geigh,trace2,simx
from PyQuante.Ints import getJ,getK
print ("PyQ1 ROHF run")
atoms = Molecule('Pyq1',atomlist=atomtuples,multiplicity=mult)
bfs = Ints.getbasis(atoms,basis=basis)
S,h,I2e = Ints.getints(bfs,atoms)
nbf = norbs = len(bfs)
nel = atoms.get_nel()
nalpha,nbeta = atoms.get_alphabeta()
enuke = atoms.get_enuke()
orbe,orbs = geigh(h,S)
eold = 0
for i in range(maxit):
Da = mkdens(orbs,0,nalpha)
Db = mkdens(orbs,0,nbeta)
Ja = getJ(I2e,Da)
Jb = getJ(I2e,Db)
Ka = getK(I2e,Da)
Kb = getK(I2e,Db)
Fa = h+Ja+Jb-Ka
Fb = h+Ja+Jb-Kb
energya = get_energy(h,Fa,Da)
energyb = get_energy(h,Fb,Db)
eone = (trace2(Da,h) + trace2(Db,h))/2
etwo = (trace2(Da,Fa) + trace2(Db,Fb))/2
energy = (energya+energyb)/2 + enuke
print (i,energy,eone,etwo,enuke)
if abs(energy-eold) < 1e-5: break
eold = energy
Fa = simx(Fa,orbs)
Fb = simx(Fb,orbs)
# Building the approximate Fock matrices in the MO basis
F = 0.5*(Fa+Fb)
K = Fb-Fa
# The Fock matrix now looks like
# F-K | F + K/2 | F
# ---------------------------------
# F + K/2 | F | F - K/2
# ---------------------------------
# F | F - K/2 | F + K
# Make explicit slice objects to simplify this
do = slice(0,nbeta)
so = slice(nbeta,nalpha)
uo = slice(nalpha,norbs)
F[do,do] -= K[do,do]
F[uo,uo] += K[uo,uo]
F[do,so] += 0.5*K[do,so]
F[so,do] += 0.5*K[so,do]
F[so,uo] -= 0.5*K[so,uo]
F[uo,so] -= 0.5*K[uo,so]
orbe,mo_orbs = np.linalg.eigh(F)
orbs = np.dot(orbs,mo_orbs)
return energy,orbe,orbs
def pyq1_dft(atomtuples=[(2,(0,0,0))],basis = '6-31G**',maxit=10,
xcname='SVWN'):
from PyQuante import Ints,settings,Molecule
from PyQuante.dft import getXC
from PyQuante.MG2 import MG2 as MolecularGrid
from PyQuante.LA2 import mkdens,geigh,trace2
from PyQuante.Ints import getJ
print ("PyQ1 DFT run")
atoms = Molecule('Pyq1',atomlist=atomtuples)
bfs = Ints.getbasis(atoms,basis=basis)
S,h,Ints = Ints.getints(bfs,atoms)
nclosed,nopen = nel//2,nel%2
assert nopen==0
enuke = atoms.get_enuke()
grid_nrad = settings.DFTGridRadii
grid_fineness = settings.DFTGridFineness
gr = MolecularGrid(atoms,grid_nrad,grid_fineness)
gr.set_bf_amps(bfs)
orbe,orbs = geigh(h,S)
eold = 0
for i in range(maxit):
D = mkdens(orbs,0,nclosed)
gr.setdens(D)
J = getJ(Ints,D)
Exc,Vxc = getXC(gr,nel,functional=xcname)
F = h+2*J+Vxc
orbe,orbs = geigh(F,S)
Ej = 2*trace2(D,J)
Eone = 2*trace2(D,h)
energy = Eone + Ej + Exc + enuke
print (i,energy,Eone,Ej,Exc,enuke)
if np.isclose(energy,eold):
break
eold = energy
return energy
def func_compare():
import matplotlib.pyplot as plt
import pyquante2 as pyq2
from PyQuante.DFunctionals import cvwn
ns = np.linspace(0.,100)
c2 = pyq2.dft.functionals.cvwn5(ns,ns)
fc1 = [cvwn(n,n)[0] for n in ns]
dfc1 = [cvwn(n,n)[1] for n in ns]
plt.plot(ns,c2[0],label='f_vwn5/pyq2')#,marker='o',linestyle='None')
plt.plot(ns,fc1,label='f_vwn5/pyq1')
plt.plot(ns,c2[1],label='df_vwn5/pyq2')#,marker='o',linestyle='None')
plt.plot(ns,dfc1,label='df_vwn5/pyq1')
plt.show()
def pyq2_rohf(atomtuples=[(2,0,0,0)],basis = '6-31G**',maxit=10,xcname='svwn',
mult=3):
import pyquante2 as pyq2
print ("pyq2 ROHF run")
geo = pyq2.molecule(atomtuples,multiplicity=mult)
bfs = pyq2.basisset(geo,name=basis)
i1 = pyq2.onee_integrals(bfs,geo)
i2 = pyq2.twoe_integrals(bfs)
h = i1.T + i1.V
orbe,orbs = pyq2.geigh(h,i1.S)
eold = 0
E0 = geo.nuclear_repulsion()
nalpha,nbeta = geo.nup(),geo.ndown()
norbs = len(bfs)
for i in range(maxit):
Da = pyq2.dmat(orbs,nalpha)
Db = pyq2.dmat(orbs,nbeta)
E1 = 0.5*pyq2.trace2(Da+Db,h)
Ja,Ka = i2.get_j(Da),i2.get_k(Da)
Jb,Kb = i2.get_j(Db),i2.get_k(Db)
Fa = h + Ja + Jb - Ka
Fb = h + Ja + Jb - Kb
E2 = 0.5*(pyq2.trace2(Fa,Da)+pyq2.trace2(Fb,Db))
energy = E0+E1+E2
print (energy,E1,E2,E0)
Fa = pyq2.utils.simx(Fa,orbs)
Fb = pyq2.utils.simx(Fb,orbs)
F = 0.5*(Fa+Fb)
K = Fb-Fa
# Make explicit slice objects to simplify this
do = slice(0,nbeta)
so = slice(nbeta,nalpha)
uo = slice(nalpha,norbs)
F[do,do] -= K[do,do]
F[uo,uo] += K[uo,uo]
F[do,so] += 0.5*K[do,so]
F[so,do] += 0.5*K[so,do]
F[so,uo] -= 0.5*K[so,uo]
F[uo,so] -= 0.5*K[uo,so]
E,cmo = np.linalg.eigh(F)
orbs = np.dot(orbs,cmo)
return
def pyq2_dft(atomtuples=[(2,0,0,0)],basis = '6-31G**',maxit=10,xcname='svwn'):
import pyquante2 as pyq2
print ("pyq2 DFT run")
geo = pyq2.molecule(atomtuples)
bfs = pyq2.basisset(geo,name=basis)
i1 = pyq2.onee_integrals(bfs,geo)
i2 = pyq2.twoe_integrals(bfs)
grid = pyq2.grid(geo)
h = i1.T + i1.V
orbe,orbs = pyq2.geigh(h,i1.S)
eold = 0
grid.setbfamps(bfs)
E0 = geo.nuclear_repulsion()
for i in range(maxit):
D = pyq2.dmat(orbs,geo.nocc())
E1 = 2*pyq2.trace2(h,D)
J = i2.get_j(D)
Ej = 2*pyq2.trace2(J,D)
Exc,Vxc = pyq2.get_xc(grid,0.5*D,xcname=xcname)
energy = E0+E1+Ej+Exc
F = h+2*J+Vxc
orbe,orbs = pyq2.geigh(F,i1.S)
print (i,energy,E1,Ej,Exc,E0)
if np.isclose(energy,eold):
break
eold = energy
return energy
if __name__ == '__main__':
pyq1_rohf()
pyq2_rohf()
#pyq1_dft()
#pyq2_dft()
#func_compare()
| bsd-3-clause |
zhuangjun1981/retinotopic_mapping | retinotopic_mapping/test/test_DisplayStimulus.py | 1 | 2314 | import os
import unittest
import retinotopic_mapping.DisplayStimulus as ds
curr_folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(curr_folder)
class TestSimulation(unittest.TestCase):
def setUp(self):
import retinotopic_mapping.MonitorSetup as ms
# Setup monitor/indicator objects
self.monitor = ms.Monitor(resolution=(1200, 1600), dis=15.,
mon_width_cm=40., mon_height_cm=30.,
C2T_cm=15., C2A_cm=20., center_coordinates=(0., 60.),
downsample_rate=10)
# import matplotlib.pyplot as plt
# self.monitor.plot_map()
# plt.show()
self.indicator = ms.Indicator(self.monitor, width_cm=3., height_cm=3., position='northeast',
is_sync=True, freq=1.)
def test_initial_background(self):
import retinotopic_mapping.StimulusRoutines as stim
log_dir = os.path.join(curr_folder, 'test_data')
displayer = ds.DisplaySequence(log_dir=log_dir, backupdir=None, identifier='TEST', display_iter=1,
mouse_id='MOUSE', user_id='USER', psychopy_mon='testMonitor',
is_by_index=True, is_interpolate=False, is_triggered=False,
is_save_sequence=False, trigger_event="negative_edge",
trigger_NI_dev='Dev1', trigger_NI_port=1, trigger_NI_line=0,
is_sync_pulse=False, sync_pulse_NI_dev='Dev1', sync_pulse_NI_port=1,
sync_pulse_NI_line=1, display_screen=0, initial_background_color=0.,
color_weights=(0., 1., 1.))
# print(displayer.initial_background_color)
uc = stim.UniformContrast(monitor=self.monitor, indicator=self.indicator, pregap_dur=0.1,
postgap_dur=0.1, coordinate='degree',
background=0., duration=0.1, color=0.8)
displayer.set_stim(uc)
log_path = displayer.trigger_display()
import shutil
log_dir = os.path.join(curr_folder, 'test_data', 'visual_display_log')
shutil.rmtree(log_dir)
| gpl-3.0 |
adolfocorreia/portfolio | retriever/fund.py | 1 | 1691 | import glob
import re
import pandas as pd
from .retriever import ValueRetriever
class FundRetriever(ValueRetriever):
_regex = re.compile(r"(\.|/|-)")
def __init__(self):
ValueRetriever.__init__(self, "fund")
def _get_data_file_patterns(self):
return [self.data_directory + "/" + FundRetriever._regex.sub('', code)
+ "_%s.csv" for code in self.codes]
def _available_codes(self):
return [FundRetriever._regex.sub('', code) for code in self.codes]
def _load_data_files(self):
print "Loading Fund CSV files..."
self._data = {}
names = [
"Dia",
"Quota",
"CaptacaoDia",
"ResgateDia",
"Patrimonio",
"TotalCarteira",
"NumCotistas",
"ProxData",
]
file_list = sorted(glob.glob(
self.data_directory + "/??????????????_????.csv"))
for file_name in file_list:
print "Loading file %s..." % file_name
fund_cnpj = file_name.split('/')[-1][:14]
if fund_cnpj not in self._data:
self._data[fund_cnpj] = pd.DataFrame()
df = pd.read_csv(
file_name,
names=names,
header=0,
skiprows=1,
parse_dates=['Dia'],
index_col=0
)
self._data[fund_cnpj] = self._data[fund_cnpj].append(df)
def get_value(self, code, date):
ValueRetriever.get_value(self, code, date)
ts = pd.Timestamp(date)
asof_ts = self._data[code].index.asof(ts)
return self._data[code].ix[asof_ts].Quota
| gpl-2.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 18 | 26105 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| mit |
longyangking/ML | tensorflow/pdes/schrodinger.py | 1 | 1859 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
## TODO: Absorption BoTndary Condition
def make_kernel(a):
a = np.asarray(a)
a = a.reshape(list(a.shape) + [1,1])
return tf.constant(a,dtype=1)
def simple_conv(x,k):
x = tf.expand_dims(tf.expand_dims(x,0),-1)
y = tf.nn.depthwise_conv2d(x,k,[1,1,1,1],padding='SAME')
return y[0,:,:,0]
def laplace(x):
laplace_k = make_kernel([[0.0,1.0,0.0],
[1.0,-4.0,1.0],
[0.0,1.0,0.0]])
return simple_conv(x, laplace_k)
a0 = 5.0e-3
V0 = 0
sess = tf.InteractiveSession()
N = 100
dx0 = 2.0/N
T0 = 1.0
dt0 = 0.9*dx0**2/2
n = int(T0/dt0)
fr_init = np.zeros((N,N)).astype('float32')
fi_init = np.zeros((N,N)).astype('float32')
fr_init[int(N/2),int(N/2)] = 1/np.square(2)
fi_init[int(N/2),int(N/2)] = 1/np.square(2)
dt = tf.placeholder(tf.float32,shape=())
dx = tf.placeholder(tf.float32,shape=())
a = tf.placeholder(tf.float32,shape=())
V = tf.placeholder(tf.float32,shape=())
fr = tf.Variable(fr_init)
fi = tf.Variable(fi_init)
fr_ = fr + dt*(a*laplace(fi)/dx/dx + V*fi)
fi_ = fi - dt*(a*laplace(fr)/dx/dx + V*fr)
step = tf.group(
# Core
fr[1:-1,1:-1].assign(fr_[1:-1,1:-1]),
fi[1:-1,1:-1].assign(fi_[1:-1,1:-1]),
# BoTndary
fr[0,:].assign(fr[1,:]),
fr[-1,:].assign(fr[-2,:]),
fr[:,0].assign(fr[:,1]),
fr[:,-1].assign(fr[:,-2]),
fi[0,:].assign(fi[1,:]),
fi[-1,:].assign(fi[-2,:]),
fi[:,0].assign(fi[:,1]),
fi[:,-1].assign(fi[:,-2])
)
tf.initialize_all_variables().run()
print('Starting... with steps {n}'.format(n=n))
for i in range(n):
step.run({a:a0,dt:dt0,dx:dx0,V:V0})
if i%1000==0:
print('Complete:{:.2f}%...'.format(i/n*100))
plt.imshow(fr.eval(),cmap='jet',interpolation="bicubic")
plt.xticks([])
plt.yticks([])
plt.show() | lgpl-3.0 |
ratnania/caid | examples/plasma_equilibrium_2d.py | 1 | 1939 | # -*- coding: UTF-8 -*-
#! /usr/bin/python
from matplotlib import pyplot as plt
import numpy as np
# ... creates miller equilibrium with default parameters
from caid.cad_geometry import miller_equilibrium
geo = miller_equilibrium(n=[31,31], p=[3,3])
geo.plotMesh(MeshResolution=3)
plt.savefig("plasma_equilibrium_2d_miller_ex1.png")
# ...
plt.clf()
# ... creates miller equilibrium
rmin=0.3 ; rmax=1.
params_shape = {}
params_shape['A'] = 3.17
params_shape['psi_tilde'] = 0.77
params_shape['kappa0'] = 1.66
params_shape['delta0'] = 0.416
params_shape['alpha'] = 1.22
params_eq = {}
params_eq['sk'] = 0.7
params_eq['sd'] = 1.37
params_eq['dR0'] =-0.354
params_eq['q'] = 3.03
params_eq['s'] = 2.47
from caid.cad_geometry import miller_equilibrium
geo = miller_equilibrium(rmin=rmin, rmax=rmax, n=[31,31], p=[3,3],
params_shape=params_shape, params_eq=params_eq)
geo.plotMesh(MeshResolution=3)
plt.savefig("plasma_equilibrium_2d_miller_ex2.png")
# ...
plt.clf()
# ... creates miller equilibrium with default parameters
from caid.cad_geometry import miller_equilibrium
geo = miller_equilibrium(rmin=0.4,n=[31,31], p=[3,3])
nrb = geo[0]
points = nrb.points
points[0,:,0] = points[0,:,0].mean()
points[0,:,1] = points[0,:,1].mean()
nrb.set_points(points)
geo.plotMesh(MeshResolution=3)
plt.savefig("plasma_equilibrium_2d_miller_ex3.png")
# ...
plt.clf()
# ... creates miller equilibrium
from caid.cad_geometry import miller_equilibrium
geo = miller_equilibrium(rmin=0.4, n=[31,31], p=[3,3])
# print boundaries info in order to know what is the internal boundary
geo[0].plotBoundariesInfo()
plt.savefig("plasma_equilibrium_2d_miller_ex4_info.png")
plt.clf()
# now that we know that the internal face is 1
# we can use it to split the geometry into 5 patchs
geo = geo.to5patchs(1)
geo.plotMesh(MeshResolution=3)
plt.savefig("plasma_equilibrium_2d_miller_ex4.png")
# ...
| mit |
deepmind/deepmind-research | curl/training.py | 1 | 45620 | ################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Script to train CURL."""
import collections
import functools
from absl import logging
import numpy as np
from sklearn import neighbors
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from curl import model
from curl import utils
tfc = tf.compat.v1
# pylint: disable=g-long-lambda
MainOps = collections.namedtuple('MainOps', [
'elbo', 'll', 'log_p_x', 'kl_y', 'kl_z', 'elbo_supervised', 'll_supervised',
'log_p_x_supervised', 'kl_y_supervised', 'kl_z_supervised',
'cat_probs', 'confusion', 'purity', 'latents'
])
DatasetTuple = collections.namedtuple('DatasetTuple', [
'train_data', 'train_iter_for_clf', 'train_data_for_clf',
'valid_iter', 'valid_data', 'test_iter', 'test_data', 'ds_info'
])
def compute_purity(confusion):
return np.sum(np.max(confusion, axis=0)).astype(float) / np.sum(confusion)
def process_dataset(iterator,
ops_to_run,
sess,
feed_dict=None,
aggregation_ops=np.stack,
processing_ops=None):
"""Process a dataset by computing ops and accumulating batch by batch.
Args:
iterator: iterator through the dataset.
ops_to_run: dict, tf ops to run as part of dataset processing.
sess: tf.Session to use.
feed_dict: dict, required placeholders.
aggregation_ops: fn or dict of fns, aggregation op to apply for each op.
processing_ops: fn or dict of fns, extra processing op to apply for each op.
Returns:
Results accumulated over dataset.
"""
if not isinstance(ops_to_run, dict):
raise TypeError('ops_to_run must be specified as a dict')
if not isinstance(aggregation_ops, dict):
aggregation_ops = {k: aggregation_ops for k in ops_to_run}
if not isinstance(processing_ops, dict):
processing_ops = {k: processing_ops for k in ops_to_run}
out_results = collections.OrderedDict()
sess.run(iterator.initializer)
while True:
# Iterate over the whole dataset and append the results to a per-key list.
try:
outs = sess.run(ops_to_run, feed_dict=feed_dict)
for key, value in outs.items():
out_results.setdefault(key, []).append(value)
except tf.errors.OutOfRangeError: # end of dataset iterator
break
# Aggregate and process results.
for key, value in out_results.items():
if aggregation_ops[key]:
out_results[key] = aggregation_ops[key](value)
if processing_ops[key]:
out_results[key] = processing_ops[key](out_results[key], axis=0)
return out_results
def get_data_sources(dataset, dataset_kwargs, batch_size, test_batch_size,
training_data_type, n_concurrent_classes, image_key,
label_key):
"""Create and return data sources for training, validation, and testing.
Args:
dataset: str, name of dataset ('mnist', 'omniglot', etc).
dataset_kwargs: dict, kwargs used in tf dataset constructors.
batch_size: int, batch size used for training.
test_batch_size: int, batch size used for evaluation.
training_data_type: str, how training data is seen ('iid', or 'sequential').
n_concurrent_classes: int, # classes seen at a time (ignored for 'iid').
image_key: str, name if image key in dataset.
label_key: str, name of label key in dataset.
Returns:
A namedtuple containing all of the dataset iterators and batches.
"""
# Load training data sources
ds_train, ds_info = tfds.load(
name=dataset,
split=tfds.Split.TRAIN,
with_info=True,
as_dataset_kwargs={'shuffle_files': False},
**dataset_kwargs)
# Validate assumption that data is in [0, 255]
assert ds_info.features[image_key].dtype == tf.uint8
n_classes = ds_info.features[label_key].num_classes
num_train_examples = ds_info.splits['train'].num_examples
def preprocess_data(x):
"""Convert images from uint8 in [0, 255] to float in [0, 1]."""
x[image_key] = tf.image.convert_image_dtype(x[image_key], tf.float32)
return x
if training_data_type == 'sequential':
c = None # The index of the class number, None for now and updated later
if n_concurrent_classes == 1:
filter_fn = lambda v: tf.equal(v[label_key], c)
else:
# Define the lowest and highest class number at each data period.
assert n_classes % n_concurrent_classes == 0, (
'Number of total classes must be divisible by '
'number of concurrent classes')
cmin = []
cmax = []
for i in range(int(n_classes / n_concurrent_classes)):
for _ in range(n_concurrent_classes):
cmin.append(i * n_concurrent_classes)
cmax.append((i + 1) * n_concurrent_classes)
filter_fn = lambda v: tf.logical_and(
tf.greater_equal(v[label_key], cmin[c]), tf.less(
v[label_key], cmax[c]))
# Set up data sources/queues (one for each class).
train_datasets = []
train_iterators = []
train_data = []
full_ds = ds_train.repeat().shuffle(num_train_examples, seed=0)
full_ds = full_ds.map(preprocess_data)
for c in range(n_classes):
filtered_ds = full_ds.filter(filter_fn).batch(
batch_size, drop_remainder=True)
train_datasets.append(filtered_ds)
train_iterators.append(train_datasets[-1].make_one_shot_iterator())
train_data.append(train_iterators[-1].get_next())
else: # not sequential
full_ds = ds_train.repeat().shuffle(num_train_examples, seed=0)
full_ds = full_ds.map(preprocess_data)
train_datasets = full_ds.batch(batch_size, drop_remainder=True)
train_data = train_datasets.make_one_shot_iterator().get_next()
# Set up data source to get full training set for classifier training
full_ds = ds_train.repeat(1).shuffle(num_train_examples, seed=0)
full_ds = full_ds.map(preprocess_data)
train_datasets_for_classifier = full_ds.batch(
test_batch_size, drop_remainder=True)
train_iter_for_classifier = (
train_datasets_for_classifier.make_initializable_iterator())
train_data_for_classifier = train_iter_for_classifier.get_next()
# Load validation dataset.
try:
valid_dataset = tfds.load(
name=dataset, split=tfds.Split.VALIDATION, **dataset_kwargs)
num_valid_examples = ds_info.splits[tfds.Split.VALIDATION].num_examples
assert (num_valid_examples %
test_batch_size == 0), ('test_batch_size must be a divisor of %d' %
num_valid_examples)
valid_dataset = valid_dataset.repeat(1).batch(
test_batch_size, drop_remainder=True)
valid_dataset = valid_dataset.map(preprocess_data)
valid_iter = valid_dataset.make_initializable_iterator()
valid_data = valid_iter.get_next()
except (KeyError, ValueError):
logging.warning('No validation set!!')
valid_iter = None
valid_data = None
# Load test dataset.
test_dataset = tfds.load(
name=dataset, split=tfds.Split.TEST, **dataset_kwargs)
num_test_examples = ds_info.splits['test'].num_examples
assert (num_test_examples %
test_batch_size == 0), ('test_batch_size must be a divisor of %d' %
num_test_examples)
test_dataset = test_dataset.repeat(1).batch(
test_batch_size, drop_remainder=True)
test_dataset = test_dataset.map(preprocess_data)
test_iter = test_dataset.make_initializable_iterator()
test_data = test_iter.get_next()
logging.info('Loaded %s data', dataset)
return DatasetTuple(train_data, train_iter_for_classifier,
train_data_for_classifier, valid_iter, valid_data,
test_iter, test_data, ds_info)
def setup_training_and_eval_graphs(x, label, y, n_y, curl_model,
classify_with_samples, is_training, name):
"""Set up the graph and return ops for training or evaluation.
Args:
x: tf placeholder for image.
label: tf placeholder for ground truth label.
y: tf placeholder for some self-supervised label/prediction.
n_y: int, dimensionality of discrete latent variable y.
curl_model: snt.AbstractModule representing the CURL model.
classify_with_samples: bool, whether to *sample* latents for classification.
is_training: bool, whether this graph is the training graph.
name: str, graph name.
Returns:
A namedtuple with the required graph ops to perform training or evaluation.
"""
# kl_y_supervised is -log q(y=y_true | x)
(log_p_x, kl_y, kl_z, log_p_x_supervised, kl_y_supervised,
kl_z_supervised) = curl_model.log_prob_elbo_components(x, y)
ll = log_p_x - kl_y - kl_z
elbo = -tf.reduce_mean(ll)
# Supervised loss, either for SMGR, or adaptation to supervised benchmark.
ll_supervised = log_p_x_supervised - kl_y_supervised - kl_z_supervised
elbo_supervised = -tf.reduce_mean(ll_supervised)
# Summaries
kl_y = tf.reduce_mean(kl_y)
kl_z = tf.reduce_mean(kl_z)
log_p_x_supervised = tf.reduce_mean(log_p_x_supervised)
kl_y_supervised = tf.reduce_mean(kl_y_supervised)
kl_z_supervised = tf.reduce_mean(kl_z_supervised)
# Evaluation.
hiddens = curl_model.get_shared_rep(x, is_training=is_training)
cat = curl_model.infer_cluster(hiddens)
cat_probs = cat.probs
confusion = tf.confusion_matrix(label, tf.argmax(cat_probs, axis=1),
num_classes=n_y, name=name + '_confusion')
purity = (tf.reduce_sum(tf.reduce_max(confusion, axis=0))
/ tf.reduce_sum(confusion))
if classify_with_samples:
latents = curl_model.infer_latent(
hiddens=hiddens, y=tf.to_float(cat.sample())).sample()
else:
latents = curl_model.infer_latent(
hiddens=hiddens, y=tf.to_float(cat.mode())).mean()
return MainOps(elbo, ll, log_p_x, kl_y, kl_z, elbo_supervised, ll_supervised,
log_p_x_supervised, kl_y_supervised, kl_z_supervised,
cat_probs, confusion, purity, latents)
def get_generated_data(sess, gen_op, y_input, gen_buffer_size,
component_counts):
"""Get generated model data (in place of saving a model snapshot).
Args:
sess: tf.Session.
gen_op: tf op representing a batch of generated data.
y_input: tf placeholder for which mixture components to generate from.
gen_buffer_size: int, number of data points to generate.
component_counts: np.array, prior probabilities over components.
Returns:
A tuple of two numpy arrays
The generated data
The corresponding labels
"""
batch_size, n_y = y_input.shape.as_list()
# Sample based on the history of all components used.
cluster_sample_probs = component_counts.astype(float)
cluster_sample_probs = np.maximum(1e-12, cluster_sample_probs)
cluster_sample_probs = cluster_sample_probs / np.sum(cluster_sample_probs)
# Now generate the data based on the specified cluster prior.
gen_buffer_images = []
gen_buffer_labels = []
for _ in range(gen_buffer_size):
gen_label = np.random.choice(
np.arange(n_y),
size=(batch_size,),
replace=True,
p=cluster_sample_probs)
y_gen_posterior_vals = np.zeros((batch_size, n_y))
y_gen_posterior_vals[np.arange(batch_size), gen_label] = 1
gen_image = sess.run(gen_op, feed_dict={y_input: y_gen_posterior_vals})
gen_buffer_images.append(gen_image)
gen_buffer_labels.append(gen_label)
gen_buffer_images = np.vstack(gen_buffer_images)
gen_buffer_labels = np.concatenate(gen_buffer_labels)
return gen_buffer_images, gen_buffer_labels
def setup_dynamic_ops(n_y):
"""Set up ops to move / copy mixture component weights for dynamic expansion.
Args:
n_y: int, dimensionality of discrete latent variable y.
Returns:
A dict containing all of the ops required for dynamic updating.
"""
# Set up graph ops to dynamically modify component params.
graph = tf.get_default_graph()
# 1) Ops to get and set latent encoder params (entire tensors)
latent_enc_tensors = {}
for k in range(n_y):
latent_enc_tensors['latent_w_' + str(k)] = graph.get_tensor_by_name(
'latent_encoder/mlp_latent_encoder_{}/w:0'.format(k))
latent_enc_tensors['latent_b_' + str(k)] = graph.get_tensor_by_name(
'latent_encoder/mlp_latent_encoder_{}/b:0'.format(k))
latent_enc_assign_ops = {}
latent_enc_phs = {}
for key, tensor in latent_enc_tensors.items():
latent_enc_phs[key] = tfc.placeholder(tensor.dtype, tensor.shape)
latent_enc_assign_ops[key] = tf.assign(tensor, latent_enc_phs[key])
# 2) Ops to get and set cluster encoder params (columns of a tensor)
# We will be copying column ind_from to column ind_to.
cluster_w = graph.get_tensor_by_name(
'cluster_encoder/mlp_cluster_encoder_final/w:0')
cluster_b = graph.get_tensor_by_name(
'cluster_encoder/mlp_cluster_encoder_final/b:0')
ind_from = tfc.placeholder(dtype=tf.int32)
ind_to = tfc.placeholder(dtype=tf.int32)
# Determine indices of cluster encoder weights and biases to be updated
w_indices = tf.transpose(
tf.stack([
tf.range(cluster_w.shape[0], dtype=tf.int32),
ind_to * tf.ones(shape=(cluster_w.shape[0],), dtype=tf.int32)
]))
b_indices = ind_to
# Determine updates themselves
cluster_w_updates = tf.squeeze(
tf.slice(cluster_w, begin=(0, ind_from), size=(cluster_w.shape[0], 1)))
cluster_b_updates = cluster_b[ind_from]
# Create update ops
cluster_w_update_op = tf.scatter_nd_update(cluster_w, w_indices,
cluster_w_updates)
cluster_b_update_op = tf.scatter_update(cluster_b, b_indices,
cluster_b_updates)
# 3) Ops to get and set latent prior params (columns of a tensor)
# We will be copying column ind_from to column ind_to.
latent_prior_mu_w = graph.get_tensor_by_name(
'latent_decoder/latent_prior_mu/w:0')
latent_prior_sigma_w = graph.get_tensor_by_name(
'latent_decoder/latent_prior_sigma/w:0')
mu_indices = tf.transpose(
tf.stack([
ind_to * tf.ones(shape=(latent_prior_mu_w.shape[1],), dtype=tf.int32),
tf.range(latent_prior_mu_w.shape[1], dtype=tf.int32)
]))
mu_updates = tf.squeeze(
tf.slice(
latent_prior_mu_w,
begin=(ind_from, 0),
size=(1, latent_prior_mu_w.shape[1])))
mu_update_op = tf.scatter_nd_update(latent_prior_mu_w, mu_indices, mu_updates)
sigma_indices = tf.transpose(
tf.stack([
ind_to *
tf.ones(shape=(latent_prior_sigma_w.shape[1],), dtype=tf.int32),
tf.range(latent_prior_sigma_w.shape[1], dtype=tf.int32)
]))
sigma_updates = tf.squeeze(
tf.slice(
latent_prior_sigma_w,
begin=(ind_from, 0),
size=(1, latent_prior_sigma_w.shape[1])))
sigma_update_op = tf.scatter_nd_update(latent_prior_sigma_w, sigma_indices,
sigma_updates)
dynamic_ops = {
'ind_from_ph': ind_from,
'ind_to_ph': ind_to,
'latent_enc_tensors': latent_enc_tensors,
'latent_enc_assign_ops': latent_enc_assign_ops,
'latent_enc_phs': latent_enc_phs,
'cluster_w_update_op': cluster_w_update_op,
'cluster_b_update_op': cluster_b_update_op,
'mu_update_op': mu_update_op,
'sigma_update_op': sigma_update_op
}
return dynamic_ops
def copy_component_params(ind_from, ind_to, sess, ind_from_ph, ind_to_ph,
latent_enc_tensors, latent_enc_assign_ops,
latent_enc_phs,
cluster_w_update_op, cluster_b_update_op,
mu_update_op, sigma_update_op):
"""Copy parameters from component i to component j.
Args:
ind_from: int, component index to copy from.
ind_to: int, component index to copy to.
sess: tf.Session.
ind_from_ph: tf placeholder for component to copy from.
ind_to_ph: tf placeholder for component to copy to.
latent_enc_tensors: dict, tensors in the latent posterior encoder.
latent_enc_assign_ops: dict, assignment ops for latent posterior encoder.
latent_enc_phs: dict, placeholders for assignment ops.
cluster_w_update_op: op for updating weights of cluster encoder.
cluster_b_update_op: op for updating biased of cluster encoder.
mu_update_op: op for updating mu weights of latent prior.
sigma_update_op: op for updating sigma weights of latent prior.
"""
update_ops = []
feed_dict = {}
# Copy for latent encoder.
new_w_val, new_b_val = sess.run([
latent_enc_tensors['latent_w_' + str(ind_from)],
latent_enc_tensors['latent_b_' + str(ind_from)]
])
update_ops.extend([
latent_enc_assign_ops['latent_w_' + str(ind_to)],
latent_enc_assign_ops['latent_b_' + str(ind_to)]
])
feed_dict.update({
latent_enc_phs['latent_w_' + str(ind_to)]: new_w_val,
latent_enc_phs['latent_b_' + str(ind_to)]: new_b_val
})
# Copy for cluster encoder softmax.
update_ops.extend([cluster_w_update_op, cluster_b_update_op])
feed_dict.update({ind_from_ph: ind_from, ind_to_ph: ind_to})
# Copy for latent prior.
update_ops.extend([mu_update_op, sigma_update_op])
feed_dict.update({ind_from_ph: ind_from, ind_to_ph: ind_to})
sess.run(update_ops, feed_dict)
def run_training(
dataset,
training_data_type,
n_concurrent_classes,
blend_classes,
train_supervised,
n_steps,
random_seed,
lr_init,
lr_factor,
lr_schedule,
output_type,
n_y,
n_y_active,
n_z,
encoder_kwargs,
decoder_kwargs,
dynamic_expansion,
ll_thresh,
classify_with_samples,
report_interval,
knn_values,
gen_replay_type,
use_supervised_replay):
"""Run training script.
Args:
dataset: str, name of the dataset.
training_data_type: str, type of training run ('iid' or 'sequential').
n_concurrent_classes: int, # of classes seen at a time (ignored for 'iid').
blend_classes: bool, whether to blend in samples from the next class.
train_supervised: bool, whether to use supervision during training.
n_steps: int, number of total training steps.
random_seed: int, seed for tf and numpy RNG.
lr_init: float, initial learning rate.
lr_factor: float, learning rate decay factor.
lr_schedule: float, epochs at which the decay should be applied.
output_type: str, output distribution (currently only 'bernoulli').
n_y: int, maximum possible dimensionality of discrete latent variable y.
n_y_active: int, starting dimensionality of discrete latent variable y.
n_z: int, dimensionality of continuous latent variable z.
encoder_kwargs: dict, parameters to specify encoder.
decoder_kwargs: dict, parameters to specify decoder.
dynamic_expansion: bool, whether to perform dynamic expansion.
ll_thresh: float, log-likelihood threshold below which to keep poor samples.
classify_with_samples: bool, whether to sample latents when classifying.
report_interval: int, number of steps after which to evaluate and report.
knn_values: list of ints, k values for different k-NN classifiers to run
(values of 3, 5, and 10 were used in different parts of the paper).
gen_replay_type: str, 'fixed', 'dynamic', or None.
use_supervised_replay: str, whether to use supervised replay (aka 'SMGR').
"""
# Set tf random seed.
tfc.set_random_seed(random_seed)
np.set_printoptions(precision=2, suppress=True)
# First set up the data source(s) and get dataset info.
if dataset == 'mnist':
batch_size = 100
test_batch_size = 1000
dataset_kwargs = {}
image_key = 'image'
label_key = 'label'
elif dataset == 'omniglot':
batch_size = 15
test_batch_size = 1318
dataset_kwargs = {}
image_key = 'image'
label_key = 'alphabet'
else:
raise NotImplementedError
dataset_ops = get_data_sources(dataset, dataset_kwargs, batch_size,
test_batch_size, training_data_type,
n_concurrent_classes, image_key, label_key)
train_data = dataset_ops.train_data
train_data_for_clf = dataset_ops.train_data_for_clf
valid_data = dataset_ops.valid_data
test_data = dataset_ops.test_data
output_shape = dataset_ops.ds_info.features[image_key].shape
n_x = np.prod(output_shape)
n_classes = dataset_ops.ds_info.features[label_key].num_classes
num_train_examples = dataset_ops.ds_info.splits['train'].num_examples
# Check that the number of classes is compatible with the training scenario
assert n_classes % n_concurrent_classes == 0
assert n_steps % (n_classes / n_concurrent_classes) == 0
# Set specific params depending on the type of gen replay
if gen_replay_type == 'fixed':
data_period = data_period = int(n_steps /
(n_classes / n_concurrent_classes))
gen_every_n = 2 # Blend in a gen replay batch every 2 steps
gen_refresh_period = data_period # How often to refresh the batches of
# generated data (equivalent to snapshotting a generative model)
gen_refresh_on_expansion = False # Don't refresh on dyn expansion
elif gen_replay_type == 'dynamic':
gen_every_n = 2 # Blend in a gen replay batch every 2 steps
gen_refresh_period = 1e8 # Never refresh generated data periodically
gen_refresh_on_expansion = True # Refresh on dyn expansion instead
elif gen_replay_type is None:
gen_every_n = 0 # Don't use any gen replay batches
gen_refresh_period = 1e8 # Never refresh generated data periodically
gen_refresh_on_expansion = False # Don't refresh on dyn expansion
else:
raise NotImplementedError
max_gen_batches = 5000 # Max num of gen batches (proxy for storing a model)
# Set dynamic expansion parameters
exp_wait_steps = 100 # Steps to wait after expansion before eligible again
exp_burn_in = 100 # Steps to wait at start of learning before eligible
exp_buffer_size = 100 # Size of the buffer of poorly explained data
num_buffer_train_steps = 10 # Num steps to train component on buffer
# Define a global tf variable for the number of active components.
n_y_active_np = n_y_active
n_y_active = tfc.get_variable(
initializer=tf.constant(n_y_active_np, dtype=tf.int32),
trainable=False,
name='n_y_active',
dtype=tf.int32)
logging.info('Starting CURL script on %s data.', dataset)
# Set up placeholders for training.
x_train_raw = tfc.placeholder(
dtype=tf.float32, shape=(batch_size,) + output_shape)
label_train = tfc.placeholder(dtype=tf.int32, shape=(batch_size,))
def binarize_fn(x):
"""Binarize a Bernoulli by rounding the probabilities.
Args:
x: tf tensor, input image.
Returns:
A tf tensor with the binarized image
"""
return tf.cast(tf.greater(x, 0.5 * tf.ones_like(x)), tf.float32)
if dataset == 'mnist':
x_train = binarize_fn(x_train_raw)
x_valid = binarize_fn(valid_data[image_key]) if valid_data else None
x_test = binarize_fn(test_data[image_key])
x_train_for_clf = binarize_fn(train_data_for_clf[image_key])
elif 'cifar' in dataset or dataset == 'omniglot':
x_train = x_train_raw
x_valid = valid_data[image_key] if valid_data else None
x_test = test_data[image_key]
x_train_for_clf = train_data_for_clf[image_key]
else:
raise ValueError('Unknown dataset {}'.format(dataset))
label_valid = valid_data[label_key] if valid_data else None
label_test = test_data[label_key]
# Set up CURL modules.
shared_encoder = model.SharedEncoder(name='shared_encoder', **encoder_kwargs)
latent_encoder = functools.partial(model.latent_encoder_fn, n_y=n_y, n_z=n_z)
latent_encoder = snt.Module(latent_encoder, name='latent_encoder')
latent_decoder = functools.partial(model.latent_decoder_fn, n_z=n_z)
latent_decoder = snt.Module(latent_decoder, name='latent_decoder')
cluster_encoder = functools.partial(
model.cluster_encoder_fn, n_y_active=n_y_active, n_y=n_y)
cluster_encoder = snt.Module(cluster_encoder, name='cluster_encoder')
data_decoder = functools.partial(
model.data_decoder_fn,
output_type=output_type,
output_shape=output_shape,
n_x=n_x,
n_y=n_y,
**decoder_kwargs)
data_decoder = snt.Module(data_decoder, name='data_decoder')
# Uniform prior over y.
prior_train_probs = utils.construct_prior_probs(batch_size, n_y, n_y_active)
prior_train = snt.Module(
lambda: tfp.distributions.OneHotCategorical(probs=prior_train_probs),
name='prior_unconditional_train')
prior_test_probs = utils.construct_prior_probs(test_batch_size, n_y,
n_y_active)
prior_test = snt.Module(
lambda: tfp.distributions.OneHotCategorical(probs=prior_test_probs),
name='prior_unconditional_test')
model_train = model.Curl(
prior_train,
latent_decoder,
data_decoder,
shared_encoder,
cluster_encoder,
latent_encoder,
n_y_active,
is_training=True,
name='curl_train')
model_eval = model.Curl(
prior_test,
latent_decoder,
data_decoder,
shared_encoder,
cluster_encoder,
latent_encoder,
n_y_active,
is_training=False,
name='curl_test')
# Set up training graph
y_train = label_train if train_supervised else None
y_valid = label_valid if train_supervised else None
y_test = label_test if train_supervised else None
train_ops = setup_training_and_eval_graphs(
x_train,
label_train,
y_train,
n_y,
model_train,
classify_with_samples,
is_training=True,
name='train')
hiddens_for_clf = model_eval.get_shared_rep(x_train_for_clf,
is_training=False)
cat_for_clf = model_eval.infer_cluster(hiddens_for_clf)
if classify_with_samples:
latents_for_clf = model_eval.infer_latent(
hiddens=hiddens_for_clf, y=tf.to_float(cat_for_clf.sample())).sample()
else:
latents_for_clf = model_eval.infer_latent(
hiddens=hiddens_for_clf, y=tf.to_float(cat_for_clf.mode())).mean()
# Set up validation graph
if valid_data is not None:
valid_ops = setup_training_and_eval_graphs(
x_valid,
label_valid,
y_valid,
n_y,
model_eval,
classify_with_samples,
is_training=False,
name='valid')
# Set up test graph
test_ops = setup_training_and_eval_graphs(
x_test,
label_test,
y_test,
n_y,
model_eval,
classify_with_samples,
is_training=False,
name='test')
# Set up optimizer (with scheduler).
global_step = tf.train.get_or_create_global_step()
lr_schedule = [
tf.cast(el * num_train_examples / batch_size, tf.int64)
for el in lr_schedule
]
num_schedule_steps = tf.reduce_sum(
tf.cast(global_step >= lr_schedule, tf.float32))
lr = float(lr_init) * float(lr_factor)**num_schedule_steps
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.minimize(train_ops.elbo)
train_step_supervised = optimizer.minimize(train_ops.elbo_supervised)
# For dynamic expansion, we want to train only new-component-related params
cat_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
'cluster_encoder/mlp_cluster_encoder_final')
component_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
'latent_encoder/mlp_latent_encoder_*')
prior_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
'latent_decoder/latent_prior*')
train_step_expansion = optimizer.minimize(
train_ops.elbo_supervised,
var_list=cat_params+component_params+prior_params)
# Set up ops for generative replay
if gen_every_n > 0:
# How many generative batches will we use each period?
gen_buffer_size = min(
int(gen_refresh_period / gen_every_n), max_gen_batches)
# Class each sample should be drawn from (default to uniform prior)
y_gen = tfp.distributions.OneHotCategorical(
probs=np.ones((batch_size, n_y)) / n_y,
dtype=tf.float32,
name='extra_train_classes').sample()
gen_samples = model_train.sample(y=y_gen, mean=True)
if dataset == 'mnist' or dataset == 'omniglot':
gen_samples = binarize_fn(gen_samples)
# Set up ops to dynamically modify parameters (for dynamic expansion)
dynamic_ops = setup_dynamic_ops(n_y)
logging.info('Created computation graph.')
n_steps_per_class = n_steps / n_classes # pylint: disable=invalid-name
cumulative_component_counts = np.array([0] * n_y).astype(float)
recent_component_counts = np.array([0] * n_y).astype(float)
gen_buffer_ind = 0
# Buffer of poorly explained data (if we're doing dynamic expansion).
poor_data_buffer = []
poor_data_labels = []
all_full_poor_data_buffers = []
all_full_poor_data_labels = []
has_expanded = False
steps_since_expansion = 0
gen_buffer_ind = 0
eligible_for_expansion = False # Flag to ensure we wait a bit after expansion
# Set up basic ops to run and quantities to log.
ops_to_run = {
'train_ELBO': train_ops.elbo,
'train_log_p_x': train_ops.log_p_x,
'train_kl_y': train_ops.kl_y,
'train_kl_z': train_ops.kl_z,
'train_ll': train_ops.ll,
'train_batch_purity': train_ops.purity,
'train_probs': train_ops.cat_probs,
'n_y_active': n_y_active
}
if valid_data is not None:
valid_ops_to_run = {
'valid_ELBO': valid_ops.elbo,
'valid_kl_y': valid_ops.kl_y,
'valid_kl_z': valid_ops.kl_z,
'valid_confusion': valid_ops.confusion
}
else:
valid_ops_to_run = {}
test_ops_to_run = {
'test_ELBO': test_ops.elbo,
'test_kl_y': test_ops.kl_y,
'test_kl_z': test_ops.kl_z,
'test_confusion': test_ops.confusion
}
to_log = ['train_batch_purity']
to_log_eval = ['test_purity', 'test_ELBO', 'test_kl_y', 'test_kl_z']
if valid_data is not None:
to_log_eval += ['valid_ELBO', 'valid_purity']
if train_supervised:
# Track supervised losses, train on supervised loss.
ops_to_run.update({
'train_ELBO_supervised': train_ops.elbo_supervised,
'train_log_p_x_supervised': train_ops.log_p_x_supervised,
'train_kl_y_supervised': train_ops.kl_y_supervised,
'train_kl_z_supervised': train_ops.kl_z_supervised,
'train_ll_supervised': train_ops.ll_supervised
})
default_train_step = train_step_supervised
to_log += [
'train_ELBO_supervised', 'train_log_p_x_supervised',
'train_kl_y_supervised', 'train_kl_z_supervised'
]
else:
# Track unsupervised losses, train on unsupervised loss.
ops_to_run.update({
'train_ELBO': train_ops.elbo,
'train_kl_y': train_ops.kl_y,
'train_kl_z': train_ops.kl_z,
'train_ll': train_ops.ll
})
default_train_step = train_step
to_log += ['train_ELBO', 'train_kl_y', 'train_kl_z']
with tf.train.SingularMonitoredSession() as sess:
for step in range(n_steps):
feed_dict = {}
# Use the default training loss, but vary it each step depending on the
# training scenario (eg. for supervised gen replay, we alternate losses)
ops_to_run['train_step'] = default_train_step
### 1) PERIODICALLY TAKE SNAPSHOTS FOR GENERATIVE REPLAY ###
if (gen_refresh_period and step % gen_refresh_period == 0 and
gen_every_n > 0):
# First, increment cumulative count and reset recent probs count.
cumulative_component_counts += recent_component_counts
recent_component_counts = np.zeros(n_y)
# Generate enough samples for the rest of the next period
# (Functionally equivalent to storing and sampling from the model).
gen_buffer_images, gen_buffer_labels = get_generated_data(
sess=sess,
gen_op=gen_samples,
y_input=y_gen,
gen_buffer_size=gen_buffer_size,
component_counts=cumulative_component_counts)
### 2) DECIDE WHICH DATA SOURCE TO USE (GENERATIVE OR REAL DATA) ###
periodic_refresh_started = (
gen_refresh_period and step >= gen_refresh_period)
refresh_on_expansion_started = (gen_refresh_on_expansion and has_expanded)
if ((periodic_refresh_started or refresh_on_expansion_started) and
gen_every_n > 0 and step % gen_every_n == 1):
# Use generated data for the training batch
used_real_data = False
s = gen_buffer_ind * batch_size
e = (gen_buffer_ind + 1) * batch_size
gen_data_array = {
'image': gen_buffer_images[s:e],
'label': gen_buffer_labels[s:e]
}
gen_buffer_ind = (gen_buffer_ind + 1) % gen_buffer_size
# Feed it as x_train because it's already reshaped and binarized.
feed_dict.update({
x_train: gen_data_array['image'],
label_train: gen_data_array['label']
})
if use_supervised_replay:
# Convert label to one-hot before feeding in.
gen_label_onehot = np.eye(n_y)[gen_data_array['label']]
feed_dict.update({model_train.y_label: gen_label_onehot})
ops_to_run['train_step'] = train_step_supervised
else:
# Else use the standard training data sources.
used_real_data = True
# Select appropriate data source for iid or sequential setup.
if training_data_type == 'sequential':
current_data_period = int(
min(step / n_steps_per_class, len(train_data) - 1))
# If training supervised, set n_y_active directly based on how many
# classes have been seen
if train_supervised:
assert not dynamic_expansion
n_y_active_np = n_concurrent_classes * (
current_data_period // n_concurrent_classes +1)
n_y_active.load(n_y_active_np, sess)
train_data_array = sess.run(train_data[current_data_period])
# If we are blending classes, figure out where we are in the data
# period and add some fraction of other samples.
if blend_classes:
# If in the first quarter, blend in examples from the previous class
if (step % n_steps_per_class < n_steps_per_class / 4 and
current_data_period > 0):
other_train_data_array = sess.run(
train_data[current_data_period - 1])
num_other = int(
(n_steps_per_class / 2 - 2 *
(step % n_steps_per_class)) * batch_size / n_steps_per_class)
other_inds = np.random.permutation(batch_size)[:num_other]
train_data_array[image_key][:num_other] = other_train_data_array[
image_key][other_inds]
train_data_array[label_key][:num_other] = other_train_data_array[
label_key][other_inds]
# If in the last quarter, blend in examples from the next class
elif (step % n_steps_per_class > 3 * n_steps_per_class / 4 and
current_data_period < n_classes - 1):
other_train_data_array = sess.run(train_data[current_data_period +
1])
num_other = int(
(2 * (step % n_steps_per_class) - 3 * n_steps_per_class / 2) *
batch_size / n_steps_per_class)
other_inds = np.random.permutation(batch_size)[:num_other]
train_data_array[image_key][:num_other] = other_train_data_array[
image_key][other_inds]
train_data_array['label'][:num_other] = other_train_data_array[
label_key][other_inds]
# Otherwise, just use the current class
else:
train_data_array = sess.run(train_data)
feed_dict.update({
x_train_raw: train_data_array[image_key],
label_train: train_data_array[label_key]
})
### 3) PERFORM A GRADIENT STEP ###
results = sess.run(ops_to_run, feed_dict=feed_dict)
del results['train_step']
### 4) COMPUTE ADDITIONAL DIAGNOSTIC OPS ON VALIDATION/TEST SETS. ###
if (step+1) % report_interval == 0:
if valid_data is not None:
logging.info('Evaluating on validation and test set!')
proc_ops = {
k: (np.sum if 'confusion' in k
else np.mean) for k in valid_ops_to_run
}
results.update(
process_dataset(
dataset_ops.valid_iter,
valid_ops_to_run,
sess,
feed_dict=feed_dict,
processing_ops=proc_ops))
results['valid_purity'] = compute_purity(results['valid_confusion'])
else:
logging.info('Evaluating on test set!')
proc_ops = {
k: (np.sum if 'confusion' in k
else np.mean) for k in test_ops_to_run
}
results.update(process_dataset(dataset_ops.test_iter,
test_ops_to_run,
sess,
feed_dict=feed_dict,
processing_ops=proc_ops))
results['test_purity'] = compute_purity(results['test_confusion'])
curr_to_log = to_log + to_log_eval
else:
curr_to_log = list(to_log) # copy to prevent in-place modifications
### 5) DYNAMIC EXPANSION ###
if dynamic_expansion and used_real_data:
# If we're doing dynamic expansion and below max capacity then add
# poorly defined data points to a buffer.
# First check whether the model is eligible for expansion (the model
# becomes ineligible for a fixed time after each expansion, and when
# it has hit max capacity).
if (steps_since_expansion >= exp_wait_steps and step >= exp_burn_in and
n_y_active_np < n_y):
eligible_for_expansion = True
steps_since_expansion += 1
if eligible_for_expansion:
# Add poorly explained data samples to a buffer.
poor_inds = results['train_ll'] < ll_thresh
poor_data_buffer.extend(feed_dict[x_train_raw][poor_inds])
poor_data_labels.extend(feed_dict[label_train][poor_inds])
n_poor_data = len(poor_data_buffer)
# If buffer is big enough, then add a new component and train just the
# new component with several steps of gradient descent.
# (We just feed in a onehot cluster vector to indicate which
# component).
if n_poor_data >= exp_buffer_size:
# Dump the buffers so we can log them.
all_full_poor_data_buffers.append(poor_data_buffer)
all_full_poor_data_labels.append(poor_data_labels)
# Take a new generative snapshot if specified.
if gen_refresh_on_expansion and gen_every_n > 0:
# Increment cumulative count and reset recent probs count.
cumulative_component_counts += recent_component_counts
recent_component_counts = np.zeros(n_y)
gen_buffer_images, gen_buffer_labels = get_generated_data(
sess=sess,
gen_op=gen_samples,
y_input=y_gen,
gen_buffer_size=gen_buffer_size,
component_counts=cumulative_component_counts)
# Cull to a multiple of batch_size (keep the later data samples).
n_poor_batches = int(n_poor_data / batch_size)
poor_data_buffer = poor_data_buffer[-(n_poor_batches * batch_size):]
poor_data_labels = poor_data_labels[-(n_poor_batches * batch_size):]
# Find most probable component (on poor batch).
poor_cprobs = []
for bs in range(n_poor_batches):
poor_cprobs.append(
sess.run(
train_ops.cat_probs,
feed_dict={
x_train_raw:
poor_data_buffer[bs * batch_size:(bs + 1) *
batch_size]
}))
best_cluster = np.argmax(np.sum(np.vstack(poor_cprobs), axis=0))
# Initialize parameters of the new component from most prob
# existing.
new_cluster = n_y_active_np
copy_component_params(best_cluster, new_cluster, sess,
**dynamic_ops)
# Increment mixture component count n_y_active.
n_y_active_np += 1
n_y_active.load(n_y_active_np, sess)
# Perform a number of steps of gradient descent on the data buffer,
# training only the new component (supervised loss).
for _ in range(num_buffer_train_steps):
for bs in range(n_poor_batches):
x_batch = poor_data_buffer[bs * batch_size:(bs + 1) *
batch_size]
label_batch = [new_cluster] * batch_size
label_onehot_batch = np.eye(n_y)[label_batch]
_ = sess.run(
train_step_expansion,
feed_dict={
x_train_raw: x_batch,
model_train.y_label: label_onehot_batch
})
# Empty the buffer.
poor_data_buffer = []
poor_data_labels = []
# Reset the threshold flag so we have a burn in before the next
# component.
eligible_for_expansion = False
has_expanded = True
steps_since_expansion = 0
# Accumulate counts.
if used_real_data:
train_cat_probs_vals = results['train_probs']
recent_component_counts += np.sum(
train_cat_probs_vals, axis=0).astype(float)
### 6) LOGGING AND EVALUATION ###
cleanup_for_print = lambda x: ', {}: %.{}f'.format(
x.capitalize().replace('_', ' '), 3)
log_str = 'Iteration %d'
log_str += ''.join([cleanup_for_print(el) for el in curr_to_log])
log_str += ' n_active: %d'
logging.info(
log_str,
*([step] + [results[el] for el in curr_to_log] + [n_y_active_np]))
# Periodically perform evaluation
if (step + 1) % report_interval == 0:
# Report test purity and related measures
logging.info(
'Iteration %d, Test purity: %.3f, Test ELBO: %.3f, Test '
'KLy: %.3f, Test KLz: %.3f', step, results['test_purity'],
results['test_ELBO'], results['test_kl_y'], results['test_kl_z'])
# Flush data only once in a while to allow buffering of data for more
# efficient writes.
results['all_full_poor_data_buffers'] = all_full_poor_data_buffers
results['all_full_poor_data_labels'] = all_full_poor_data_labels
logging.info('Also training a classifier in latent space')
# Perform knn classification from latents, to evaluate discriminability.
# Get and encode training and test datasets.
clf_train_vals = process_dataset(
dataset_ops.train_iter_for_clf, {
'latents': latents_for_clf,
'labels': train_data_for_clf[label_key]
},
sess,
feed_dict,
aggregation_ops=np.concatenate)
clf_test_vals = process_dataset(
dataset_ops.test_iter, {
'latents': test_ops.latents,
'labels': test_data[label_key]
},
sess,
aggregation_ops=np.concatenate)
# Perform knn classification.
knn_models = []
for nval in knn_values:
# Fit training dataset.
clf = neighbors.KNeighborsClassifier(n_neighbors=nval)
clf.fit(clf_train_vals['latents'], clf_train_vals['labels'])
knn_models.append(clf)
results['train_' + str(nval) + 'nn_acc'] = clf.score(
clf_train_vals['latents'], clf_train_vals['labels'])
# Get test performance.
results['test_' + str(nval) + 'nn_acc'] = clf.score(
clf_test_vals['latents'], clf_test_vals['labels'])
logging.info(
'Iteration %d %d-NN classifier accuracies, Training: '
'%.3f, Test: %.3f', step, nval,
results['train_' + str(nval) + 'nn_acc'],
results['test_' + str(nval) + 'nn_acc'])
| apache-2.0 |
sachinpro/sachinpro.github.io | tensorflow/contrib/learn/python/learn/tests/dataframe/test_dataframe.py | 1 | 4503 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(tf.test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select(self):
df = setup_test_df()
df2 = df.select(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual("Fake Tensor 2", c1.build())
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockColumn("QuackColumn", [])
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockColumn("QuackColumn", [])
col2 = mocks.MockColumn("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {"a": "Fake Tensor 1",
"b": "Fake Tensor 2",
"c": "Fake Tensor 1"}
self.assertEqual(expected, result)
def test_to_input_fn_all_features(self):
df = setup_test_df()
input_fn = df.to_input_fn()
f, t = input_fn()
expected_f = {"a": "Fake Tensor 1",
"b": "Fake Tensor 2",
"c": "Fake Tensor 1"}
self.assertEqual(expected_f, f)
expected_t = {}
self.assertEqual(expected_t, t)
def test_to_input_fn_features_only(self):
df = setup_test_df()
input_fn = df.to_input_fn(["b", "c"])
f, t = input_fn()
expected_f = {"b": "Fake Tensor 2", "c": "Fake Tensor 1"}
self.assertEqual(expected_f, f)
expected_t = {}
self.assertEqual(expected_t, t)
def test_to_input_fn_targets_only(self):
df = setup_test_df()
input_fn = df.to_input_fn(target_keys=["b", "c"])
f, t = input_fn()
expected_f = {"a": "Fake Tensor 1"}
self.assertEqual(expected_f, f)
expected_t = {"b": "Fake Tensor 2", "c": "Fake Tensor 1"}
self.assertEqual(expected_t, t)
def test_to_input_fn_both(self):
df = setup_test_df()
input_fn = df.to_input_fn(feature_keys=["a"], target_keys=["b"])
f, t = input_fn()
expected_f = {"a": "Fake Tensor 1"}
self.assertEqual(expected_f, f)
expected_t = {"b": "Fake Tensor 2"}
self.assertEqual(expected_t, t)
def test_to_input_fn_not_disjoint(self):
df = setup_test_df()
def get_not_disjoint():
df.to_input_fn(feature_keys=["a", "b"], target_keys=["b"])
self.assertRaises(ValueError, get_not_disjoint)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
tedunderwood/biographies | topicmodel/interpret/measure_variation.py | 1 | 2502 | ## measure_variation.py
# This script is designed to automate the process of condensing multiple models
# and testing them against pre-registered hypotheses.
# We start with a modelname (arg1). We expect to find 12 doctopics files
# associated with that model. Each of those files will be
# condensed into a roletheme file and stored in a folder associated
# with the modelname. In doing this, we will also need to know
# the number of themes (as versus roles) associated with this model.
# Then we run smart-evaluate and regular-evaluation processes for all
# of the roletheme files.
import sys, csv, os
import numpy as np
import pandas as pd
import autocondense_rolethemes as autocondense
import autoevaluate_hypotheses as evaluate
import autosmartevaluate_rolethemes as smartevaluate
args = sys.argv
modelname = args[1]
themecount = int(args[2])
inroot = '/projects/ischoolichass/ichass/usesofscale/code/roles/'
outroot = '../' + modelname + '_mcmc/'
if not os.path.isdir(outroot):
os.mkdir(outroot)
outfields = ['model', 'iteration', 'basetotal', 'baseself', 'basesocial',
'basestructural', 'smarttotal', 'smartself', 'smartsocial', 'smartstructural']
if not os.path.isfile('variations.tsv'):
with open('variations.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('\t'.join(outfields) + '\n')
# CONDENSE DOCTOPIC FILES
for i in range(12):
inpath = inroot + modelname + '_mcmc' + str(i) + '_doctopics.tsv'
outpath = outroot + modelname + '_mcmc' + str(i) + '_rolethemes.tsv'
if os.path.isfile(outpath):
continue
elif not os.path.isfile(inpath):
break
else:
print("Condense: " + str(i))
autocondense.condense_a_file(inpath, outpath, themecount)
ceiling = i + 1
# That's the number of files we found
rows = dict()
for i in range(ceiling):
inpath = outroot + modelname + '_mcmc' + str(i) + '_rolethemes.tsv'
rows[i] = dict()
rows[i]['basetotal'], rows[i]['baseself'], rows[i]['basesocial'], rows[i]['basestructural'] = evaluate.evaluate_a_model(inpath)
print()
for i in range(ceiling):
inpath = outroot + modelname + '_mcmc' + str(i) + '_rolethemes.tsv'
rows[i]['smarttotal'], rows[i]['smartself'], rows[i]['smartsocial'], rows[i]['smartstructural'] = smartevaluate.smarteval_a_model(inpath, themecount)
with open('variations.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = outfields, delimiter = '\t')
for i in range(ceiling):
rows[i]['iteration'] = i
rows[i]['model'] = modelname
scribe.writerow(rows[i])
| mit |
AlertaDengue/InfoDenguePredict | infodenguepredict/models/cross_prediction_RQF.py | 1 | 3663 | """
This scripts implements cross disease predicitons using RQF model trained on dengue
"""
from infodenguepredict.models.quantile_forest import build_model, build_lagged_features, calculate_metrics
from infodenguepredict.data.infodengue import get_cluster_data, get_city_names
from infodenguepredict.predict_settings import *
import joblib
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from skgarden import RandomForestQuantileRegressor
def plot_prediction(pred, pred25, pred975, ydata, horizon, title, path='quantile_forest', save=True, doenca='chik'):
plt.clf()
plt.plot(ydata, 'k-', label='data')
x = ydata.index.shift(horizon, freq='W')
plt.plot(x, pred, 'r-', alpha=0.5, label='median prediction')
# plt.plot(x, y25, 'b-', alpha=0.3)
# plt.plot(x, y975, 'b-', alpha=0.3)
plt.fill_between(x, pred25, pred975, color='b', alpha=0.3)
plt.grid()
plt.ylabel('Weekly cases')
plt.title('{} cross-predictions for {}'.format(doenca, title))
plt.xticks(rotation=70)
plt.legend(loc=0)
if save:
if not os.path.exists('saved_models/' + path + '/' + STATE):
os.mkdir('saved_models/' + path + '/' + STATE)
plt.savefig('saved_models/{}/{}/qf_{}_cross_{}_.png'.format(path, STATE, doenca, title), dpi=300)
plt.show()
return None
def qf_prediction(city, state, horizon, lookback, doenca='chik'):
with open('../analysis/clusters_{}.pkl'.format(state), 'rb') as fp:
clusters = pickle.load(fp)
data, group = get_cluster_data(city, clusters=clusters, data_types=DATA_TYPES, cols=PREDICTORS, doenca=doenca)
target = 'casos_est_{}'.format(city)
casos_est_columns = ['casos_est_{}'.format(i) for i in group]
# casos_columns = ['casos_{}'.format(i) for i in group]
# data = data_full.drop(casos_columns, axis=1)
data_lag = build_lagged_features(data, lookback)
data_lag.dropna()
data_lag = data_lag['2016-01-01':]
targets = {}
for d in range(1, horizon + 1):
if d == 1:
targets[d] = data_lag[target].shift(-(d - 1))
else:
targets[d] = data_lag[target].shift(-(d - 1))[:-(d - 1)]
X_data = data_lag.drop(casos_est_columns, axis=1)
city_name = get_city_names([city, 0])[0][1]
# Load dengue model
model = joblib.load('saved_models/quantile_forest/{}/{}_city_model_{}W.joblib'.format(state, city, horizon))
pred25 = model.predict(X_data, quantile=2.5)
pred = model.predict(X_data, quantile=50)
pred975 = model.predict(X_data, quantile=97.5)
# metrics.to_pickle('{}/{}/qf_metrics_{}.pkl'.format('saved_models/quantile_forest', state, city))
plot_prediction(pred, pred25, pred975, targets[1], horizon, city_name, save=True, doenca=doenca)
return model, pred, pred25, pred975, X_data, targets, data_lag
if __name__ == "__main__":
doença = 'chik'
STATE = 'RJ'
if STATE == 'RJ':
cities = [3304557, 3303500, 3301009, 3304904]
elif STATE == 'CE':
cities = [2304400, 2307650]
for CITY in cities:
model, preds, preds25, preds975, X_data, targets, data_lag = qf_prediction(CITY, STATE,
horizon=PREDICTION_WINDOW,
lookback=LOOK_BACK, doenca=doença)
# Save cross-predictions
with open(f'saved_models/quantile_forest/{STATE}/{CITY}_cross_{doença}_preditions.pkl','wb') as f:
pickle.dump({'xdata': X_data, 'target': targets, 'pred': preds, 'ub': preds975, 'lb': preds25}, f)
| gpl-3.0 |
etkirsch/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/feature_selection/tests/test_base.py | 15 | 3668 | import numpy as np
import pytest
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection._base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, accept_sparse='csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert np.int32 == sel.transform(X.astype(np.int32)).dtype
assert np.float32 == sel.transform(X.astype(np.float32)).dtype
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.transform(np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert np.int32 == sel.transform(sparse(X).astype(np.int32)).dtype
assert np.float32 == sel.transform(sparse(X).astype(np.float32)).dtype
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.transform(np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert (np.int32 ==
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert (np.float32 ==
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.inverse_transform(np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert (np.int32 ==
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert (np.float32 ==
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.inverse_transform(np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
mattHawthorn/sk-torch | sktorch/interface.py | 1 | 40749 | #coding:utf-8
from typing import Any, Tuple, List, Iterable, Callable, Union, IO, Optional as Opt
import pickle
from time import time
from numpy import ndarray
from torch import autograd, nn, optim, from_numpy, stack
from torch.nn.modules import loss
from torch.nn.utils import clip_grad_norm
from .util import cuda_available, peek, pretty_time
from .util import get_torch_object_bytes, load_torch_object_bytes, open_file
from .stopping import max_generalization_loss#, tail_losses_n_consecutive_increases, tail_losses_no_relative_improvement
from .data import efficient_batch_iterator, TupleIteratorDataLoader
from .data import T1, T2, TensorType, FloatTensorType, FloatTensorTypes
DEFAULT_BATCH_SIZE = 32
DEFAULT_STOPPING_CRITERION = max_generalization_loss(0.05)
def training_mode(mode: bool):
"""decorator factory to make a decorator to set the training mode of an NN with a pytorch backend"""
def dec(nn_method):
def method(obj: 'TorchModel', *args, **kwargs):
obj.set_mode(mode)
result = nn_method(obj, *args, **kwargs)
obj.set_mode(False)
return result
return method
return dec
class TorchModel:
"""Wrapper class to handle encoding inputs to pytorch variables, managing transfer to/from the GPU,
handling train/eval mode, etc."""
def __init__(self, torch_module: nn.Module, loss_func: Union[loss._Loss, type, str],
optimizer: Union[str, optim.Optimizer],
loss_func_kwargs: Opt[dict]=None,
optimizer_kwargs: Opt[dict]=None,
input_encoder: Opt[Callable[[T1], TensorType]]=None,
target_encoder: Opt[Callable[[T2], TensorType]]=None,
output_decoder: Opt[Callable[[TensorType], T2]]=None,
is_classifier: bool=False,
estimate_normalization_samples: Opt[int]=None,
default_batch_size: int=DEFAULT_BATCH_SIZE,
stopping_criterion: Callable[[List[float], Opt[List[float]]], Union[bool, Tuple[bool, Opt[str]]]]=
DEFAULT_STOPPING_CRITERION,
print_func: Callable[[Any], None]=print,
num_dataloader_workers: int=-2):
"""
:param torch_module: a torch.nn.Module
:param loss_func: a torch.nn.modules.loss._Loss callable
:param optimizer: a torch.optim.Optimizer
:param input_encoder: a callable taking the type of the training data independent variable and encoding it to
tensors for the forward pass in the torch module
:param target_encoder: a callable taking the type of the training data dependent variable and encoding it to
tensors or numerics for the forward pass in the torch module
:param output_decoder: a callable taking a (single instance, not batch) torch tensor output of the torch module
forward pass, and returning the type of the training data dependent variable
:param estimate_normalization_samples: If normalization of inputs is called for, use this many samples of
training data to estimate the mean and sd per input dimension
:param is_classifier: boolean specifying that the target is a single class. This is required to make sure that
dependent variable batches are collated in the way that torch loss functions expect (1-dimensional)
:param print_func: callable with no return value, ideally prints to screen or log file
:param stopping_criterion: callable taking a list of epoch losses and optional validation losses and returning
either a bool or (bool, str or None). The return bool should indicate whether to stop training.
The optional return string is a message to be printed at the time that training is stopped.
:param num_dataloader_workers: int specifying how many threads should be used for data loading. 0 indicates that
all data loading is done in the main thread (same semantics as torch.utils.data.Dataloader). A negative
value indicates (available cpu's + num_dataloader_workers + 1) - same semantics as often used in sklearn.
e.g., -1 indicates as many workers as cpu's, -2 indicates 1 fewer than the number of cpu's, etc.
"""
self.gpu_enabled = cuda_available()
self._torch_module = None
self._optimizer = None
self.is_classifier = is_classifier
# property setter method ensures this goes to the gpu if it's available
self.torch_module = torch_module
# property setter method gets the torch.optim class if this is a string, checks inheritance, passes
# module params and optimizer_kwargs to constructor
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = optimizer
self.loss_func_kwargs = loss_func_kwargs
self.loss_func = loss_func
# you could pass in a logger.info/debug or a file.write method for this if you like
self.print = print_func
self.stopping_criterion = stopping_criterion
self.default_batch_size=default_batch_size
self.norm_n_samples = estimate_normalization_samples
self._input_mean = None
self._input_sd = None
self._norm_estimated = False
self.encode_input = input_encoder
self.encode_target = target_encoder
if output_decoder is not None:
self.decode_output = output_decoder
# these take tensors and wrap them in Variables and move them to the GPU if necessary
self.prepare_input = self.get_input_preparer()
self.prepare_target = self.get_target_preparer()
self.num_dataloader_workers = num_dataloader_workers
@property
def should_normalize(self):
return self.norm_n_samples is not None
def estimate_normalization(self, sample: Union[FloatTensorType, ndarray]):
"""Estimate the input normalization parameters (mean and sd) per input dimension and store them for input
normalization during fitting and prediction"""
if not self.should_normalize:
raise ValueError("This model does not require normalization of inputs; inputs may be class labels or "
"pre-normalized")
mean = sample.mean(0)
sd = sample.std(0)
self._input_mean = mean.cuda() if self.gpu_enabled else mean.cpu()
self._input_sd = sd.cuda() if self.gpu_enabled else sd.cpu()
self._norm_estimated = True
def normalize(self, X: Union[FloatTensorType, autograd.Variable]):
if not self._norm_estimated:
raise ValueError("normalization constants have not yet been estimated")
normed = (X - self._input_mean.expand_as(X))
# can do this operation in place
normed /= self._input_sd.expand_as(X)
return normed
@property
def input_mean(self):
# no setting allowed for this - don't want to mess it up!
return self._input_mean
@property
def input_sd(self):
# no setting allowed for this - don't want to mess it up!
return self._input_sd
def get_input_preparer(self) -> Callable[[TensorType], autograd.Variable]:
if self.should_normalize:
if self.gpu_enabled:
def prepare(data: TensorType) -> autograd.Variable:
return autograd.Variable(self.normalize(data.cuda()), volatile=not self._torch_module.training)
else:
def prepare(data: TensorType) -> autograd.Variable:
return autograd.Variable(self.normalize(data.cpu()), volatile=not self._torch_module.training)
else:
if self.gpu_enabled:
def prepare(data: TensorType) -> autograd.Variable:
return autograd.Variable(data.cuda(), volatile=not self._torch_module.training)
else:
def prepare(data: TensorType) -> autograd.Variable:
return autograd.Variable(data.cpu(), volatile=not self._torch_module.training)
return prepare
def get_target_preparer(self) -> Callable[[TensorType], autograd.Variable]:
if self.gpu_enabled:
def prepare(data: TensorType) -> autograd.Variable:
return autograd.Variable(data.cuda(), requires_grad=False, volatile=not self._torch_module.training)
else:
def prepare(data: TensorType) -> autograd.Variable:
return autograd.Variable(data.cpu(), requires_grad=False, volatile=not self._torch_module.training)
return prepare
@property
def torch_module(self):
return self._torch_module
@torch_module.setter
def torch_module(self, module: nn.Module):
self._torch_module = module.cuda() if self.gpu_enabled else module.cpu()
@property
def parameters(self):
return list(self.torch_module.parameters())
@property
def optimizer(self):
if self.optimizer_kwargs:
return self._optimizer(self.torch_module.parameters(), **self.optimizer_kwargs)
else:
return self._optimizer(self.torch_module.parameters())
@optimizer.setter
def optimizer(self, optimizer: Union[str, type]):
if isinstance(optimizer, str):
optimizer = getattr(optim, optimizer)
if not issubclass(optimizer, optim.Optimizer):
raise TypeError("`optimizer` must be a torch.optim.Optim or a string which refers to one by name")
self._optimizer = optimizer
@property
def loss_func(self):
return self._loss_func
@loss_func.setter
def loss_func(self, loss_func):
if isinstance(loss_func, str):
loss_func = getattr(loss, loss_func)
if isinstance(loss_func, nn.Module):
self._loss_func = loss_func
self.loss_func_kwargs = None
else:
try:
if issubclass(loss_func, nn.Module):
self._loss_func = loss_func(**self.loss_func_kwargs) if self.loss_func_kwargs else loss_func()
except:
raise TypeError("`loss_func` must be a custom loss nn.Module, a torch.nn.loss._Loss class or instance, "
"or a string which refers to one by name")
def set_mode(self, training: bool):
if self.torch_module.training != training:
self.torch_module.train(training)
def _single_batch_train_pass(self, X_batch: TensorType, y_batch: TensorType, optimizer: optim.Optimizer):
module = self.torch_module
module.zero_grad()
optimizer.zero_grad()
err = self._single_batch_test_pass(X_batch, y_batch)
err.backward()
optimizer.step()
return err
def _single_batch_test_pass(self, X_batch: TensorType, y_batch: TensorType):
y_batch = self.prepare_target(y_batch)
output = self._single_batch_forward_pass(X_batch)
err = self.loss_func(output, y_batch)
return err
def _single_batch_forward_pass(self, X_batch: TensorType):
X_batch = self.prepare_input(X_batch)
output = self.torch_module(X_batch)
return output
@training_mode(True)
def fit(self, X: Iterable[T1], y: Iterable[T2],
X_test: Opt[Iterable[T1]]=None, y_test: Opt[Iterable[T2]]=None,
batch_size: Opt[int]=None, shuffle: bool=False,
max_epochs: int=1, min_epochs: int=1, criterion_window: int=5,
max_training_time: Opt[float]=None,
batch_report_interval: Opt[int]=None, epoch_report_interval: Opt[int]=None):
"""This method fits the *entire* pipeline, including input normalization. Initialization of weight/bias
parameters in the torch_module is up to you; there is no obvious canonical way to do it here.
Returns per-epoch losses and validation losses (if any)."""
batch_size = batch_size or self.default_batch_size
if self.should_normalize:
sample, X = peek(X, self.norm_n_samples)
if self.encode_input:
sample = [self.encode_input(x) for x in sample]
sample = stack(sample)
self.estimate_normalization(sample)
return self.update(X=X, y=y, X_test=X_test, y_test=y_test, batch_size=batch_size, shuffle=shuffle,
max_epochs=max_epochs, min_epochs=min_epochs,
criterion_window=criterion_window,
max_training_time=max_training_time,
batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
@training_mode(True)
def update(self, X: Iterable[T1], y: Iterable[T2],
X_test: Opt[Iterable[T1]]=None, y_test: Opt[Iterable[T2]]=None,
batch_size: Opt[int] = None, shuffle: bool=False,
max_epochs: int = 1, min_epochs: int = 1, criterion_window: int = 5,
max_training_time: Opt[float] = None,
batch_report_interval: Opt[int]=None, epoch_report_interval: Opt[int]=None):
"""Update model parameters in light of new data X and y.
Returns per-epoch losses and validation losses (if any).
This method handles packaging X and y into a batch iterator of the kind that torch modules expect."""
assert max_epochs > 0
batch_size = batch_size or self.default_batch_size
data_kw = dict(X_encoder=self.encode_input, y_encoder=self.encode_target,
batch_size=batch_size, shuffle=shuffle,
num_workers=self.num_dataloader_workers, classifier=self.is_classifier)
dataset = efficient_batch_iterator(X, y, **data_kw)
if X_test is not None and y_test is not None:
test_data = efficient_batch_iterator(X_test, y_test, **data_kw)
else:
if X_test is not None or y_test is not None:
self.print("Warning: test data was provided but either the regressors or the response were omitted")
test_data = None
return self._update(dataset, test_data, max_epochs=max_epochs, min_epochs=min_epochs,
criterion_window=criterion_window,
max_training_time=max_training_time,
batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
@training_mode(True)
def fit_zipped(self, dataset: Iterable[Tuple[T1, T2]], test_dataset: Opt[Iterable[Tuple[T1, T2]]]=None,
batch_size: Opt[int] = None,
max_epochs: int = 1, min_epochs: int = 1, criterion_window: int = 5,
max_training_time: Opt[float] = None,
batch_report_interval: Opt[int] = None, epoch_report_interval: Opt[int] = None):
"""For fitting to an iterable sequence of pairs, such as may arise in very large streaming datasets from sources
that don't fit the random access and known-length requirements of a torch.data.Dataset (e.g. a sequence of
sentences split from a set of text files as might arise in NLP applications.
Like TorchModel.fit(), this estimates input normalization before the weight update, and weight initialization of
the torch_module is up to you. Returns per-epoch losses and validation losses (if any).
This method handles packaging X and y into a batch iterator of the kind that torch modules expect."""
batch_size = batch_size or self.default_batch_size
if self.should_normalize:
sample, dataset = peek(dataset, self.norm_n_samples)
sample = [t[0] for t in sample]
if self.encode_input:
sample = [self.encode_input(x) for x in sample]
sample = stack(sample)
self.estimate_normalization(sample)
return self.update_zipped(dataset=dataset, test_dataset=test_dataset, batch_size=batch_size,
max_epochs=max_epochs, min_epochs=min_epochs,
criterion_window=criterion_window,
max_training_time=max_training_time,
batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
@training_mode(True)
def update_zipped(self, dataset: Iterable[Tuple[T1, T2]], test_dataset: Opt[Iterable[Tuple[T1, T2]]]=None,
batch_size: Opt[int] = None,
max_epochs: int = 1, min_epochs: int = 1, criterion_window: int = 5,
max_training_time: Opt[float] = None,
batch_report_interval: Opt[int] = None, epoch_report_interval: Opt[int] = None):
"""For updating model parameters in light of an iterable sequence of (x,y) pairs, such as may arise in very
large streaming datasets from sources that don't fit the random access and known-length requirements of a
torch.data.Dataset (e.g. a sequence of sentences split from a set of text files as might arise in NLP
applications. Returns per-epoch losses and validation losses (if any)"""
batch_size = batch_size or self.default_batch_size
data_kw = dict(batch_size=batch_size, classifier=self.is_classifier,
X_encoder=self.encode_input,
y_encoder=self.encode_target)
dataset = TupleIteratorDataLoader(dataset, **data_kw)
if test_dataset is not None:
test_dataset = TupleIteratorDataLoader(test_dataset, **data_kw)
return self._update(dataset, test_dataset, max_epochs=max_epochs, min_epochs=min_epochs,
criterion_window=criterion_window,
max_training_time=max_training_time,
batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
@training_mode(True)
def fit_batched(self, batches: Iterable[Tuple[TensorType, TensorType]],
test_batches: Opt[Iterable[Tuple[TensorType, TensorType]]]=None,
max_epochs: int = 1, min_epochs: int = 1,
criterion_window: int = 5,
max_training_time: Opt[float] = None,
batch_report_interval: Opt[int] = None, epoch_report_interval: Opt[int] = None):
"""For fitting to an iterable of batch tensor pairs, such as would come from a torch.util.data.DataLoader.
Variables are therefore assumed to be already appropriately encoded, and none of the provided encoders is used.
The test set is also assumed to be in this form. Like TorchModel.fit(), this estimates input normalization
before the weight update, and weight initialization of the torch_module is up to you.
Returns per-epoch losses and validation losses (if any)"""
if self.should_normalize:
sample = []
batch_iter = iter(batches)
n_samples = 0
while n_samples < self.norm_n_samples:
batch = next(batch_iter)
sample.extend(batch)
n_samples += len(batch)
sample = stack(sample)
self.estimate_normalization(sample)
return self._update(batches=batches, test_batches=test_batches, max_epochs=max_epochs, min_epochs=min_epochs,
criterion_window=criterion_window,
max_training_time=max_training_time,
batch_report_interval=batch_report_interval, epoch_report_interval=epoch_report_interval)
@training_mode(True)
def _update(self, batches: Iterable[Tuple[TensorType, TensorType]],
test_batches: Opt[Iterable[Tuple[TensorType, TensorType]]]=None,
max_epochs: int = 1, min_epochs: int = 1,
criterion_window: Opt[int] = 5,
max_training_time: Opt[float]=None,
batch_report_interval: Opt[int] = None, epoch_report_interval: Opt[int] = None):
# all training ultimately ends up here
optimizer = self.optimizer
epoch, epoch_loss, epoch_time, epoch_samples, training_time = 0, 0.0, 0.0, 0, 0.0
test_loss, test_samples, best_test_loss, best_model = None, None, float('inf'), None
epoch_losses = []#deque(maxlen=epochs_without_improvement + 1)
if test_batches is not None:
loss_type = 'validation'
test_losses = []#deque(maxlen=epochs_without_improvement + 1)
else:
loss_type = 'training'
test_losses = None
def tail(losses):
if losses is not None:
return losses if criterion_window is None else losses[-min(criterion_window, len(losses)):]
else:
return losses
for epoch in range(1, max_epochs + 1):
epoch_start = time()
if epoch_report_interval and epoch % epoch_report_interval == 0:
self.print("Training epoch {}".format(epoch))
epoch_loss = 0.0
epoch_samples = 0
for i, (X_batch, y_batch) in enumerate(batches, 1):
batch_start = time()
batch_loss, batch_samples = self._batch_inner_block(X_batch, y_batch, optimizer)
batch_time = time() - batch_start
epoch_samples += batch_samples
epoch_loss += batch_loss
if batch_report_interval and i % batch_report_interval == 0:
self.report_batch(epoch, i, batch_loss, batch_samples, batch_time)
epoch_time = time() - epoch_start
epoch_losses.append(epoch_loss / epoch_samples)
training_time += epoch_time
if test_batches is not None:
test_loss, test_samples = self._error(test_batches)
test_losses.append(test_loss / test_samples)
if epoch_report_interval and epoch % epoch_report_interval == 0:
self.report_epoch(epoch, epoch_loss, test_loss, loss_type, epoch_samples, test_samples, epoch_time)
if test_batches is not None and test_loss <= best_test_loss:
self.print("New optimal {} loss; saving parameters".format(loss_type))
best_test_loss = test_loss
best_model = get_torch_object_bytes(self.torch_module)
self.print()
if epoch >= min_epochs and (self.stop_training(tail(epoch_losses), tail(test_losses)) or
(max_training_time is not None and training_time >= max_training_time)):
break
if test_batches is not None:
self.print("Loading parameters of {}-optimal model".format(loss_type))
self.torch_module = load_torch_object_bytes(best_model)
if epoch_report_interval and epoch % epoch_report_interval != 0:
self.report_epoch(epoch, epoch_loss, test_loss, loss_type, epoch_samples, test_samples, epoch_time)
return epoch_losses, test_losses
def _batch_inner_block(self, X_batch, y_batch, optimizer):
# factored out to allow customization for more complex models, e.g. seqence models
batch_samples = X_batch.size(0)
batch_loss = (self._single_batch_train_pass(X_batch, y_batch, optimizer)).data[0]
return batch_loss, batch_samples
# aliases
train = fit
train_zipped = fit_zipped
train_batched = fit_batched
update_batched = _update
def report_epoch(self, epoch: int, epoch_loss: float, test_loss: float, loss_type: str, epoch_samples: int,
test_samples: int, runtime: float):
lossname = self.loss_func.__class__.__name__
test_loss = test_loss or epoch_loss
test_samples = test_samples or epoch_samples
loss_ = round(test_loss/test_samples, 4)
self.print("epoch {}, {} samples, {} {} per sample: {}".format(epoch, epoch_samples, loss_type, lossname, loss_))
t, sample_t = pretty_time(runtime), pretty_time(runtime / epoch_samples)
self.print("Total runtime: {} Runtime per sample: {}".format(t, sample_t))
def report_batch(self, epoch: int, batch: int, batch_loss: float, n_samples: int, runtime: float):
lossname = self.loss_func.__class__.__name__
sample_t = pretty_time(runtime / n_samples)
loss = round(batch_loss/n_samples, 4)
self.print("epoch {}, batch {}, {} samples, runtime per sample: {}, {} per sample: {}"
"".format(epoch, batch, n_samples, sample_t, lossname, loss))
def stop_training(self, epoch_losses: Iterable[float], test_losses: Opt[Iterable[float]]) -> bool:
if test_losses is not None:
test_losses = list(test_losses)
tup = self.stopping_criterion(list(epoch_losses), test_losses)
if isinstance(tup, tuple):
stop, stop_msg = tup[0:2]
else:
stop, stop_msg = tup, None
if stop:
if stop_msg:
self.print(stop_msg)
return stop
def plot_training_loss(self, training_losses: List[float], validation_losses: Opt[List[float]]=None,
loss_name: Opt[str]=None, model_name: Opt[str]=None,
title: Opt[str]=None, training_marker: str='bo--', validation_marker: str='ro--',
ylim: Opt[Tuple[float, float]]=None,
return_fig: bool=True):
"""Plot training and validation losses as would be returned by a .fit*(...) call.
Pass optional title, markers, loss function name and model name for customization.
If return_fig is True (default), the figure object is returned for further customization, saving to a file,
etc., otherwise the plot is displayed and nothing is returned."""
try:
from matplotlib import pyplot as plt
except Exception as e:
raise e
else:
plt.rcParams['figure.figsize'] = 8, 8
fig, ax = plt.subplots()
loss_name = loss_name or self.loss_func.__class__.__name__
model_name = model_name or self.torch_module.__class__.__name__
x = list(range(1, len(training_losses) + 1))
ax.plot(x, training_losses, training_marker, label="training {}".format(loss_name))
if validation_losses is not None:
ax.plot(x, validation_losses, validation_marker, label="validation {}".format(loss_name))
ax.set_title(title or "{} {} per sample by training epoch".format(model_name, loss_name))
ax.set_xlabel("epoch")
ax.set_ylabel(loss_name)
ax.set_xticks(x)
ax.legend(loc=1)
if ylim is not None:
ax.set_ylim(*ylim)
if return_fig:
plt.show(fig)
else:
return fig
@training_mode(False)
def error(self, X: Iterable[T1], y: Iterable[T2], batch_size: Opt[int]=None, shuffle: bool=False) -> float:
batch_size = batch_size or self.default_batch_size
dataset = efficient_batch_iterator(X, y, X_encoder=self.encode_input, y_encoder=self.encode_target,
batch_size=batch_size, shuffle=shuffle,
num_workers=self.num_dataloader_workers)
err, n_samples = self._error(dataset)
return err / n_samples
@training_mode(False)
def error_zipped(self, dataset: Iterable[Tuple[T1, T2]], batch_size: Opt[int]=None) -> float:
"""For computing per-sample loss on an iterable sequence of (x,y) pairs, such as may arise in very
large streaming datasets from sources that don't fit the random access and known-length requirements of a
torch.data.Dataset (e.g. a sequence of sentences split from a set of text files as might arise in NLP
applications.
This method handles packaging X and y into a batch iterator of the kind that torch modules expect"""
batch_size = batch_size or self.default_batch_size
data_kw = dict(batch_size=batch_size, classifier=self.is_classifier,
X_encoder=self.encode_input,
y_encoder=self.encode_target)
dataset = TupleIteratorDataLoader(dataset, **data_kw)
err, n_samples = self._error(dataset)
return err / n_samples
@training_mode(False)
def error_batched(self, batches: Iterable[Tuple[TensorType, TensorType]]):
"""For computing loss on an iterable of batch tensor pairs, such as would come from a torch.util.data.DataLoader.
Variables are therefore assumed to be already appropriately encoded, and none of the provided encoders is used.
"""
err, n_samples = self._error(batches)
return err / n_samples
def _error(self, batches: Iterable[Tuple[TensorType, TensorType]]) -> Tuple[float, int]:
running_loss = 0.0
running_samples = 0
for X_batch, y_batch in batches:
err = self._single_batch_test_pass(X_batch, y_batch)
running_loss += err.data[0]
running_samples += X_batch.size()[0]
return running_loss, running_samples
# aliases
loss = error
loss_zipped = error_zipped
loss_batched = error_batched
@training_mode(False)
def predict(self, X: Iterable[Any], batch_size: Opt[int]=None, shuffle: bool=False) -> Iterable[T2]:
batch_size = batch_size or self.default_batch_size
dataset = efficient_batch_iterator(X, X_encoder=self.encode_input, y_encoder=self.encode_input,
batch_size=batch_size, shuffle=shuffle,
num_workers=self.num_dataloader_workers)
return self._predict(dataset)
@training_mode(False)
def predict_batched(self, batches: Iterable[Tuple[TensorType, TensorType]]):
return self._predict(batches)
def _predict(self, batches: Iterable[Tuple[TensorType, TensorType]]) -> Iterable[T2]:
for X_batch, _ in batches:
for output in self._single_batch_forward_pass(X_batch):
yield self.decode_output(output.data)
@staticmethod
def encode_input(X: T1) -> TensorType:
"""encode the input to a tensor that can be fed to the neural net;
this can be passed to the class constructor for customizability, else it is assumed to be the identity."""
return X
@staticmethod
def encode_target(y: T2) -> TensorType:
"""encode the output to a tensor that can be used to compute the error of a neural net prediction;
this can be passed to the class constructor for customizability, else it is assumed to be the identity."""
return y
@staticmethod
def decode_output(y: Iterable[TensorType]) -> T2:
"""take the output Variable from the neural net and decode it to whatever type the training set target was;
this can be passed to the class constructor for customizability, else it is assumed to be the identity."""
return y
def _init_dict(self):
return dict(loss_func = self.loss_func,
loss_func_kwargs = self.loss_func_kwargs,
optimizer = self._optimizer.__name__,
optimizer_kwargs = self.optimizer_kwargs,
input_encoder = self.encode_input,
target_encoder = self.encode_target,
output_decoder = self.decode_output,
is_classifier = self.is_classifier,
estimate_normalization_samples = self.norm_n_samples,
stopping_criterion = self.stopping_criterion,
num_dataloader_workers = self.num_dataloader_workers)
def _state_dict(self):
mean, sd = self._input_mean, self._input_sd
return dict(_input_mean = get_torch_object_bytes(mean) if mean is not None else mean,
_input_sd = get_torch_object_bytes(sd) if sd is not None else sd,
_norm_estimated = self._norm_estimated)
def save(self, path: Union[str, IO]):
state = self.__getstate__()
with open_file(path, 'wb') as outfile:
pickle.dump(state, outfile)
@classmethod
def load(cls, path: Union[str, IO]) -> 'TorchModel':
with open_file(path, 'rb') as infile:
state = pickle.load(infile)
model = cls.__new__(cls)
model.__setstate__(state)
return model
# for using pickle.dump/load directly
def __getstate__(self):
return (self._init_dict(), self._state_dict(), get_torch_object_bytes(self.torch_module))
def __setstate__(self, state):
init_dict, state_dict, torch_bytes = state
module = load_torch_object_bytes(torch_bytes)
self.__init__(torch_module=module, **init_dict)
for k, v in state_dict.items():
value = v if not isinstance(v, bytes) else load_torch_object_bytes(v)
self.__dict__.__setitem__(k, value)
class TorchClassifierModel(TorchModel):
"""Wrapper class to handle encoding inputs to pytorch variables, managing transfer to/from the GPU,
handling train/eval mode, etc."""
def __init__(self, torch_module: nn.Module, loss_func: Union[loss._Loss, type, str],
optimizer: Union[str, optim.Optimizer],
classes: List[T2],
loss_func_kwargs: Opt[dict]=None,
optimizer_kwargs: Opt[dict]=None,
input_encoder: Opt[Callable[[T1], TensorType]]=None,
estimate_normalization_samples: Opt[int]=None,
default_batch_size: int=DEFAULT_BATCH_SIZE,
stopping_criterion: Callable[[List[float], Opt[List[float]]], Union[bool, Tuple[bool, Opt[str]]]]=
DEFAULT_STOPPING_CRITERION,
print_func: Callable[[Any], None]=print,
num_dataloader_workers: int=-2):
class_to_int = dict(zip(classes, range(len(classes))))
int_to_class = dict(map(reversed, class_to_int.items()))
target_encoder = class_to_int.__getitem__
self.class_to_int = class_to_int
self.int_to_class = int_to_class
self.num_classes = len(class_to_int)
super(TorchClassifierModel, self).__init__(torch_module=torch_module, loss_func=loss_func, optimizer=optimizer,
loss_func_kwargs=loss_func_kwargs, optimizer_kwargs=optimizer_kwargs,
input_encoder=input_encoder, target_encoder=target_encoder,
output_decoder=self._get_classes,
is_classifier=True,
estimate_normalization_samples=estimate_normalization_samples,
default_batch_size=default_batch_size,
stopping_criterion=stopping_criterion,
print_func=print_func, num_dataloader_workers=num_dataloader_workers)
def _get_classes(self, preds: FloatTensorType):
# works for a batch or a single instance
dim = preds.ndimension()
decode = self.int_to_class.__getitem__
if dim == 2:
ids = preds.max(1)[1].squeeze(1)
return list(map(decode, ids))
elif dim == 1:
return decode(preds.max(0)[1][0])
class TorchSequenceModel(TorchModel):
def __init__(self, torch_module: nn.Module, loss_func: loss._Loss,
optimizer: optim.Optimizer,
loss_func_kwargs: Opt[dict]=None,
optimizer_kwargs: Opt[dict]=None,
input_encoder: Opt[Callable[[T1], TensorType]]=None,
target_encoder: Opt[Callable[[T2], TensorType]]=None,
output_decoder: Opt[Callable[[TensorType], T2]]=None,
clip_grad_norm: Opt[float]=None,
is_classifier: bool=False,
flatten_targets: bool=True,
flatten_output: bool=True,
bptt_len: int=20,
estimate_normalization_samples: Opt[int]=None,
default_batch_size: int=DEFAULT_BATCH_SIZE,
stopping_criterion: Callable[[List[float], Opt[List[float]]], Union[bool, Tuple[bool, Opt[str]]]] =
DEFAULT_STOPPING_CRITERION,
print_func: Callable[[Any], None]=print, num_dataloader_workers: int=-2):
super(TorchSequenceModel, self).__init__(torch_module=torch_module, loss_func=loss_func, optimizer=optimizer,
loss_func_kwargs=loss_func_kwargs, optimizer_kwargs=optimizer_kwargs,
input_encoder=input_encoder,
target_encoder=target_encoder, output_decoder=output_decoder,
is_classifier=is_classifier,
estimate_normalization_samples=estimate_normalization_samples,
default_batch_size=default_batch_size,
stopping_criterion=stopping_criterion,
print_func=print_func, num_dataloader_workers=num_dataloader_workers)
self.flatten_targets = flatten_targets
self.flatten_output = flatten_output
self.clip_grad_norm = clip_grad_norm
self.bptt_len = bptt_len
@property
def clip_grad(self):
return self.clip_grad_norm is not None
def _single_batch_train_pass(self, X_batch: TensorType, y_batch: TensorType, optimizer: optim.Optimizer):
module = self.torch_module
optimizer.zero_grad()
err = self._single_batch_test_pass(X_batch, y_batch)
err.backward()
if self.clip_grad:
clip_grad_norm(module.parameters(), self.clip_grad_norm)
optimizer.step()
return err
def _single_batch_test_pass(self, X_batch: TensorType, y_batch: TensorType):
y_batch = self.prepare_target(y_batch)
if self.flatten_targets:
y_batch = y_batch.view(-1)
output = self._single_batch_forward_pass(X_batch)
output = self._flatten_output(output)
err = self.loss_func(output, y_batch)
return err
def _flatten_output(self, output: TensorType):
size = output.size()
if len(size) == 3 and self.flatten_output:
output = output.view(size[0]*size[1], size[2])
elif len(size) != 2:
raise ValueError("Output of torch_module.forward() must be 2 or 3 dimensional, "
"corresponding to either (batch*seq, vocab) or (batch, seq, vocab)")
return output
def _single_batch_forward_pass(self, X_batch: TensorType):
X_batch = self.prepare_input(X_batch)
output = self.torch_module(X_batch)
if isinstance(output, tuple):
output = output[0]
return output
def estimate_normalization(self, sample: Union[FloatTensorType, ndarray]):
if isinstance(sample, FloatTensorTypes):
sample = sample.numpy()
sample = sample[0:self.norm_n_samples]
# statistics along both batch and sequence axes; this functionality is why we need numpy
mean = from_numpy(sample.mean((0,1)))
sd = from_numpy(sample.std((0,1)))
self._input_mean = mean.cuda() if self.gpu_enabled else mean.cpu()
self._input_sd = sd.cuda() if self.gpu_enabled else sd.cpu()
self._norm_estimated = True
# note: TorchModel.normalize should still work since Tensor.expand_as does what we hope it would do
def _predict(self, batches: Iterable[Tuple[TensorType, TensorType]]) -> Iterable[T2]:
# each X_batch is assumed to be of shape (batch, seq) or (batch, seq, features)
for X_batch, _ in batches:
for output in self._single_batch_forward_pass(X_batch):
yield self.decode_output(output.data)
def _init_dict(self):
d = super(TorchSequenceModel, self)._init_dict()
d['flatten_targets'] = self.flatten_targets
d['flatten_output'] = self.flatten_output
d['clip_grad_norm'] = self.clip_grad_norm
d['bptt_len'] = self.bptt_len
return d
| mit |
mifads/pyscripts | emxgeo/htap_regions.py | 1 | 3358 | #!/usr/bin/env python3
""" codes
16, 17 = Artic areas
2 = sea
3 = NAM, 4=EUR
"""
import numpy as np
import sys
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib as mpl
hdir='/home/davids/Data/HTAP_v2/'
ds=xr.open_dataset(hdir+'HTAP_Phase2_tier1NC05x05_v2.nc') #receptorNC.nc')
xlons=ds.long.data # 0.25 .. 359.75
lons=np.roll(xlons,360)
for i, lon in enumerate(lons):
if lon > 180:
lons[i] = lon-360 # now -179.75 .. 179.75
lats=ds.lat.data # 89.75 .. -89.75
codes=ds.region_code.values.astype(int)
codes=np.roll(codes,360,axis=1)
lats=np.flipud(lats) # now -89.75 .. 89.75
codes=np.flipud(codes)
uniq = np.unique(codes)
#print(uniq) # 2..17
dlon=lons[1]-lons[0]
dlat=lats[1]-lats[0]
lon0 = lons[0]-0.5*dlon
lat0 = lats[0]-0.5*dlat
regions=dict(NAM=3, EUR=4, SAS=5, EAS=6, PAN=8, NAF=9, SAF=10, SAM=13, RBU=14, GLOB=999 )
regLL=dict(
NAM=[-105.,48.0], EUR=[0.0,45.0], SAS=[75.0,20.0], EAS=[110.0,30.0],
PAN=[152.0,-38.0], NAF=[0.0,20.0], SAF=[20.0,-15.0],
SAM=[-60.0,-15.0], RBU=[90.0,60.0] )
def getRegionMasks(reg):
print(regions[reg])
if reg == 'GLOB':
out=np.full_like(codes,1)
else:
out=np.full_like(codes,0.0)
mask = ( codes == regions[reg] )
out[mask] = 1
#print('OUT ', out[2,2], out[300,300] )
return out
def getNearestRegion(xlat,xlon):
j = int( (xlat-lat0)/dlat )
i = int( (xlon-lon0)/dlon )
#print('CODES', j, i, xlat, lat0, dlat, xlon,lon0, dlon, codes.shape, codes[j,i])
return codes[j,i]
def get_cmaps(ctest):
v = [ i-0.5 for i in range(1,19) ]
cmap= plt.cm.get_cmap(ctest,len(v))
#cmap.set_under('0.15')
#cmap.set_over('0.15')
norm=mpl.colors.BoundaryNorm(v, cmap.N)
ncodes=codes.copy()
unset = 20
undef=ncodes< 3
ncodes[undef] = unset
undef=ncodes> 15
ncodes[undef] = unset
return ncodes, cmap, norm, v
def plotRegions():
ncodes, cmap, norm, v = get_cmaps()
plt.pcolormesh(ncodes,cmap=cmap,norm=norm) # fails:,extend='both')
plt.colorbar()
plt.ylim(ymin=100)
plt.show()
def plotCRegions(map):
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib.gridspec import GridSpec #see http://worksofscience.net/matplotlib/colorbar
ncodes, cmap, norm, v = get_cmaps(map)
#proj=ccrs.PlateCarree()
fig=plt.figure(figsize=[16,12])
#gs = GridSpec(100,100,bottom=0.05,left=0.05,right=0.88)
proj_used='Mercator'
proj_used='PlateCarree'
if proj_used=='Mercator':
ax=plt.axes(projection=ccrs.Mercator())
else:
ax=plt.axes(projection=ccrs.PlateCarree())
#ax1=fig.add_subplot(gs[:,0:85],projection=proj)
ax.contourf(lons,lats,ncodes,v,cmap=cmap,norm=norm) #,boundaries=v)
for reg, coords in regLL.items():
x=coords[0]; y=coords[1]
print(reg, x,y)
ax.text(x,y,reg,bbox=dict(facecolor='yellow')) # , alpha=0.5))
#ax1.pcolormesh(ncodes,cmap=cmap,norm=norm)
#ax1.set_ylim(ymin=100)
ax.coastlines(resolution='10m')
ax.add_feature(cfeature.BORDERS)
ax.add_feature(cfeature.COASTLINE)
plt.tight_layout
#plt.show()
plt.savefig('PlotHTAP_Regions_%s_%s.png' % (proj_used, map ))
if __name__ == '__main__':
import matplotlib.pyplot as plt
for map in 'tab20 tab20c Paired'.split():
p=plotCRegions(map)
sys.exit()
for reg in 'EUR GLOB'.split():
m=getRegionMasks(reg)
#plt.pcolormesh(m)
#plt.show()
#plt.clf()
| gpl-3.0 |
syedjafri/ThinkStats2 | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
RRCKI/panda-jedi | pandajedi/jedirefine/TaskRefinerBase.py | 1 | 38175 | import re
import sys
import uuid
import copy
import types
import datetime
import RefinerUtils
from pandajedi.jedicore import Interaction
from pandajedi.jedicore import JediException
from pandajedi.jedicore.JediTaskSpec import JediTaskSpec
from pandajedi.jedicore.JediDatasetSpec import JediDatasetSpec
from pandajedi.jedicore.JediFileSpec import JediFileSpec
from pandaserver.taskbuffer import EventServiceUtils
# base class for task refine
class TaskRefinerBase (object):
# constructor
def __init__(self,taskBufferIF,ddmIF):
self.ddmIF = ddmIF
self.taskBufferIF = taskBufferIF
self.initializeRefiner(None)
self.refresh()
# refresh
def refresh(self):
self.siteMapper = self.taskBufferIF.getSiteMapper()
# initialize
def initializeRefiner(self,tmpLog):
self.taskSpec = None
self.inMasterDatasetSpec = []
self.inSecDatasetSpecList = []
self.outDatasetSpecList = []
self.outputTemplateMap = {}
self.jobParamsTemplate = None
self.cloudName = None
self.siteName = None
self.tmpLog = tmpLog
self.updatedTaskParams = None
self.unmergeMasterDatasetSpec = {}
self.unmergeDatasetSpecMap = {}
self.oldTaskStatus = None
self.unknownDatasetList = []
# set jobParamsTemplate
def setJobParamsTemplate(self,jobParamsTemplate):
self.jobParamsTemplate = jobParamsTemplate
# extract common parameters
def extractCommon(self,jediTaskID,taskParamMap,workQueueMapper,splitRule):
# make task spec
taskSpec = JediTaskSpec()
taskSpec.jediTaskID = jediTaskID
taskSpec.taskName = taskParamMap['taskName']
taskSpec.userName = taskParamMap['userName']
taskSpec.vo = taskParamMap['vo']
taskSpec.prodSourceLabel = taskParamMap['prodSourceLabel']
taskSpec.taskPriority = taskParamMap['taskPriority']
taskSpec.currentPriority = taskSpec.taskPriority
taskSpec.architecture = taskParamMap['architecture']
taskSpec.transUses = taskParamMap['transUses']
taskSpec.transHome = taskParamMap['transHome']
taskSpec.transPath = taskParamMap['transPath']
taskSpec.processingType = taskParamMap['processingType']
taskSpec.taskType = taskParamMap['taskType']
taskSpec.splitRule = splitRule
taskSpec.startTime = datetime.datetime.utcnow()
if taskParamMap.has_key('workingGroup'):
taskSpec.workingGroup = taskParamMap['workingGroup']
if taskParamMap.has_key('countryGroup'):
taskSpec.countryGroup = taskParamMap['countryGroup']
if taskParamMap.has_key('ticketID'):
taskSpec.ticketID = taskParamMap['ticketID']
if taskParamMap.has_key('ticketSystemType'):
taskSpec.ticketSystemType = taskParamMap['ticketSystemType']
if taskParamMap.has_key('reqID'):
taskSpec.reqID = taskParamMap['reqID']
else:
taskSpec.reqID = jediTaskID
if taskParamMap.has_key('coreCount'):
taskSpec.coreCount = taskParamMap['coreCount']
else:
taskSpec.coreCount = 1
if taskParamMap.has_key('walltime'):
taskSpec.walltime = taskParamMap['walltime']
else:
taskSpec.walltime = 0
if not taskParamMap.has_key('walltimeUnit'):
# force to set NULL so that retried tasks get data from scouts again
taskSpec.forceUpdate('walltimeUnit')
if taskParamMap.has_key('outDiskCount'):
taskSpec.outDiskCount = taskParamMap['outDiskCount']
else:
taskSpec.outDiskCount = 0
if 'outDiskUnit' in taskParamMap:
taskSpec.outDiskUnit = taskParamMap['outDiskUnit']
if taskParamMap.has_key('workDiskCount'):
taskSpec.workDiskCount = taskParamMap['workDiskCount']
else:
taskSpec.workDiskCount = 0
if taskParamMap.has_key('workDiskUnit'):
taskSpec.workDiskUnit = taskParamMap['workDiskUnit']
if taskParamMap.has_key('ramCount'):
taskSpec.ramCount = taskParamMap['ramCount']
else:
taskSpec.ramCount = 0
if taskParamMap.has_key('ramUnit'):
taskSpec.ramUnit = taskParamMap['ramUnit']
if taskParamMap.has_key('baseRamCount'):
taskSpec.baseRamCount = taskParamMap['baseRamCount']
else:
taskSpec.baseRamCount = 0
# HS06 stuff
if 'cpuTimeUnit' in taskParamMap:
taskSpec.cpuTimeUnit = taskParamMap['cpuTimeUnit']
if 'cpuTime' in taskParamMap:
taskSpec.cpuTime = taskParamMap['cpuTime']
if 'cpuEfficiency' in taskParamMap:
taskSpec.cpuEfficiency = taskParamMap['cpuEfficiency']
else:
# 90% of cpu efficiency by default
taskSpec.cpuEfficiency = 90
if 'baseWalltime' in taskParamMap:
taskSpec.baseWalltime = taskParamMap['baseWalltime']
else:
# 10min of offset by default
taskSpec.baseWalltime = 10*60
# for merge
if 'mergeRamCount' in taskParamMap:
taskSpec.mergeRamCount = taskParamMap['mergeRamCount']
if 'mergeCoreCount' in taskParamMap:
taskSpec.mergeCoreCount = taskParamMap['mergeCoreCount']
# scout
if not taskParamMap.has_key('skipScout') and not taskSpec.isPostScout():
taskSpec.setUseScout(True)
# cloud
if taskParamMap.has_key('cloud'):
self.cloudName = taskParamMap['cloud']
taskSpec.cloud = self.cloudName
else:
# set dummy to force update
taskSpec.cloud = 'dummy'
taskSpec.cloud = None
# site
if taskParamMap.has_key('site'):
self.siteName = taskParamMap['site']
taskSpec.site = self.siteName
else:
# set dummy to force update
taskSpec.site = 'dummy'
taskSpec.site = None
# nucleus
if 'nucleus' in taskParamMap:
taskSpec.nucleus = taskParamMap['nucleus']
# preset some parameters for job cloning
if 'useJobCloning' in taskParamMap:
# set implicit parameters
if not 'nEventsPerWorker' in taskParamMap:
taskParamMap['nEventsPerWorker'] = 1
if not 'nSitesPerJob' in taskParamMap:
taskParamMap['nSitesPerJob'] = 2
if not 'nEsConsumers' in taskParamMap:
taskParamMap['nEsConsumers'] = taskParamMap['nSitesPerJob']
# event service flag
if 'useJobCloning' in taskParamMap:
taskSpec.eventService = 2
elif taskParamMap.has_key('nEventsPerWorker'):
taskSpec.eventService = 1
else:
taskSpec.eventService = 0
# ttcr: requested time to completion
if taskParamMap.has_key('ttcrTimestamp'):
try:
# get rid of the +00:00 timezone string and parse the timestamp
taskSpec.ttcRequested = datetime.datetime.strptime(taskParamMap['ttcrTimestamp'].split('+')[0], '%Y-%m-%d %H:%M:%S.%f')
except (IndexError, ValueError):
pass
# goal
if 'goal' in taskParamMap:
try:
taskSpec.goal = int(float(taskParamMap['goal'])*10)
if taskSpec.goal >= 1000:
taskSpec.goal = None
except:
pass
# campaign
if taskParamMap.has_key('campaign'):
taskSpec.campaign = taskParamMap['campaign']
# request type
if 'requestType' in taskParamMap:
taskSpec.requestType = taskParamMap['requestType']
self.taskSpec = taskSpec
# set split rule
if 'tgtNumEventsPerJob' in taskParamMap:
# set nEventsPerJob not respect file boundaries when nFilesPerJob is not used
if not 'nFilesPerJob' in taskParamMap:
self.setSplitRule(None,taskParamMap['tgtNumEventsPerJob'],JediTaskSpec.splitRuleToken['nEventsPerJob'])
self.setSplitRule(taskParamMap,'nFilesPerJob', JediTaskSpec.splitRuleToken['nFilesPerJob'])
self.setSplitRule(taskParamMap,'nEventsPerJob', JediTaskSpec.splitRuleToken['nEventsPerJob'])
self.setSplitRule(taskParamMap,'nGBPerJob', JediTaskSpec.splitRuleToken['nGBPerJob'])
self.setSplitRule(taskParamMap,'nMaxFilesPerJob', JediTaskSpec.splitRuleToken['nMaxFilesPerJob'])
self.setSplitRule(taskParamMap,'nEventsPerWorker', JediTaskSpec.splitRuleToken['nEventsPerWorker'])
self.setSplitRule(taskParamMap,'useLocalIO', JediTaskSpec.splitRuleToken['useLocalIO'])
self.setSplitRule(taskParamMap,'disableAutoRetry', JediTaskSpec.splitRuleToken['disableAutoRetry'])
self.setSplitRule(taskParamMap,'nEsConsumers', JediTaskSpec.splitRuleToken['nEsConsumers'])
self.setSplitRule(taskParamMap,'waitInput', JediTaskSpec.splitRuleToken['waitInput'])
self.setSplitRule(taskParamMap,'addNthFieldToLFN', JediTaskSpec.splitRuleToken['addNthFieldToLFN'])
self.setSplitRule(taskParamMap,'scoutSuccessRate', JediTaskSpec.splitRuleToken['scoutSuccessRate'])
self.setSplitRule(taskParamMap,'t1Weight', JediTaskSpec.splitRuleToken['t1Weight'])
self.setSplitRule(taskParamMap,'maxAttemptES', JediTaskSpec.splitRuleToken['maxAttemptES'])
self.setSplitRule(taskParamMap,'nSitesPerJob', JediTaskSpec.splitRuleToken['nSitesPerJob'])
self.setSplitRule(taskParamMap,'nJumboJobs', JediTaskSpec.splitRuleToken['nJumboJobs'])
self.setSplitRule(taskParamMap,'nEventsPerMergeJob', JediTaskSpec.splitRuleToken['nEventsPerMergeJob'])
self.setSplitRule(taskParamMap,'nFilesPerMergeJob', JediTaskSpec.splitRuleToken['nFilesPerMergeJob'])
self.setSplitRule(taskParamMap,'nGBPerMergeJob', JediTaskSpec.splitRuleToken['nGBPerMergeJob'])
self.setSplitRule(taskParamMap,'nMaxFilesPerMergeJob', JediTaskSpec.splitRuleToken['nMaxFilesPerMergeJob'])
if taskParamMap.has_key('loadXML'):
self.setSplitRule(None,3,JediTaskSpec.splitRuleToken['loadXML'])
self.setSplitRule(None,4,JediTaskSpec.splitRuleToken['groupBoundaryID'])
if taskParamMap.has_key('pfnList'):
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['pfnList'])
if taskParamMap.has_key('noWaitParent') and taskParamMap['noWaitParent'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['noWaitParent'])
if 'respectLB' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['respectLB'])
if taskParamMap.has_key('reuseSecOnDemand'):
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['reuseSecOnDemand'])
if 'ddmBackEnd' in taskParamMap:
self.taskSpec.setDdmBackEnd(taskParamMap['ddmBackEnd'])
if 'disableReassign' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['disableReassign'])
if 'allowPartialFinish' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['allowPartialFinish'])
if 'useExhausted' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useExhausted'])
if 'useRealNumEvents' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useRealNumEvents'])
if 'ipConnectivity' in taskParamMap:
self.taskSpec.setIpConnectivity(taskParamMap['ipConnectivity'])
if 'altStageOut' in taskParamMap:
self.taskSpec.setAltStageOut(taskParamMap['altStageOut'])
if 'allowInputLAN' in taskParamMap:
self.taskSpec.setAllowInputLAN(taskParamMap['allowInputLAN'])
if 'runUntilClosed' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['runUntilClosed'])
if 'stayOutputOnSite' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['stayOutputOnSite'])
if 'useJobCloning' in taskParamMap:
scValue = EventServiceUtils.getJobCloningValue(taskParamMap['useJobCloning'])
self.setSplitRule(None,scValue,JediTaskSpec.splitRuleToken['useJobCloning'])
if 'failWhenGoalUnreached' in taskParamMap and taskParamMap['failWhenGoalUnreached'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['failGoalUnreached'])
if 'switchEStoNormal' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['switchEStoNormal'])
if 'nEventsPerRange' in taskParamMap:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['dynamicNumEvents'])
if 'allowInputWAN' in taskParamMap and taskParamMap['allowInputWAN'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['allowInputWAN'])
if 'putLogToOS' in taskParamMap and taskParamMap['putLogToOS'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['putLogToOS'])
if 'mergeEsOnOS' in taskParamMap and taskParamMap['mergeEsOnOS'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['mergeEsOnOS'])
if 'writeInputToFile' in taskParamMap and taskParamMap['writeInputToFile'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['writeInputToFile'])
if 'useFileAsSourceLFN' in taskParamMap and taskParamMap['useFileAsSourceLFN'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['useFileAsSourceLFN'])
if 'ignoreMissingInDS' in taskParamMap and taskParamMap['ignoreMissingInDS'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['ignoreMissingInDS'])
# work queue
workQueue = None
if 'workQueueName' in taskParamMap:
# work queue is specified
workQueue = workQueueMapper.getQueueWithName(taskSpec.vo,taskSpec.prodSourceLabel,taskParamMap['workQueueName'])
if workQueue is None:
# get work queue based on task attributes
workQueue,tmpStr = workQueueMapper.getQueueWithSelParams(taskSpec.vo,
taskSpec.prodSourceLabel,
processingType=taskSpec.processingType,
workingGroup=taskSpec.workingGroup,
coreCount=taskSpec.coreCount,
site=taskSpec.site,
eventService=taskSpec.eventService,
splitRule=taskSpec.splitRule,
campaign=taskSpec.campaign)
if workQueue is None:
errStr = 'workqueue is undefined for vo={0} label={1} '.format(taskSpec.vo,taskSpec.prodSourceLabel)
errStr += 'processingType={0} workingGroup={1} coreCount={2} eventService={3} '.format(taskSpec.processingType,
taskSpec.workingGroup,
taskSpec.coreCount,
taskSpec.eventService)
errStr += 'splitRule={0} campaign={1}'.format(taskSpec.splitRule,taskSpec.campaign)
raise RuntimeError,errStr
self.taskSpec.workQueue_ID = workQueue.queue_id
# Initialize the global share
gshare = None
if 'gshare' in taskParamMap and self.taskBufferIF.is_valid_share(taskParamMap['gshare']):
# work queue is specified
gshare = taskParamMap['gshare']
else:
# get share based on definition
gshare = self.taskBufferIF.get_share_for_task(self.taskSpec)
if gshare is None:
gshare = 'No match'
# errStr = 'share is undefined for vo={0} label={1} '.format(taskSpec.vo,taskSpec.prodSourceLabel)
# errStr += 'workingGroup={0} campaign={1} '.format(taskSpec.workingGroup, taskSpec.campaign)
# raise RuntimeError,errStr
self.taskSpec.gshare = gshare
# return
return
# basic refinement procedure
def doBasicRefine(self,taskParamMap):
# get input/output/log dataset specs
nIn = 0
nOutMap = {}
if isinstance(taskParamMap['log'],dict):
itemList = taskParamMap['jobParameters'] + [taskParamMap['log']]
else:
itemList = taskParamMap['jobParameters'] + taskParamMap['log']
# pseudo input
if taskParamMap.has_key('noInput') and taskParamMap['noInput'] == True:
tmpItem = {}
tmpItem['type'] = 'template'
tmpItem['value'] = ''
tmpItem['dataset'] = 'pseudo_dataset'
tmpItem['param_type'] = 'pseudo_input'
itemList = [tmpItem] + itemList
# random seed
if RefinerUtils.useRandomSeed(taskParamMap):
tmpItem = {}
tmpItem['type'] = 'template'
tmpItem['value'] = ''
tmpItem['dataset'] = 'RNDMSEED'
tmpItem['param_type'] = 'random_seed'
itemList.append(tmpItem)
# loop over all items
allDsList = []
for tmpItem in itemList:
# look for datasets
if tmpItem['type'] == 'template' and tmpItem.has_key('dataset'):
# avoid duplication
if not tmpItem['dataset'] in allDsList:
allDsList.append(tmpItem['dataset'])
else:
continue
datasetSpec = JediDatasetSpec()
datasetSpec.datasetName = tmpItem['dataset']
datasetSpec.jediTaskID = self.taskSpec.jediTaskID
datasetSpec.type = tmpItem['param_type']
if tmpItem.has_key('container'):
datasetSpec.containerName = tmpItem['container']
if tmpItem.has_key('token'):
datasetSpec.storageToken = tmpItem['token']
if tmpItem.has_key('destination'):
datasetSpec.destination = tmpItem['destination']
if tmpItem.has_key('attributes'):
datasetSpec.setDatasetAttribute(tmpItem['attributes'])
if tmpItem.has_key('ratio'):
datasetSpec.setDatasetAttribute('ratio={0}'.format(tmpItem['ratio']))
if tmpItem.has_key('eventRatio'):
datasetSpec.setEventRatio(tmpItem['eventRatio'])
if tmpItem.has_key('check'):
datasetSpec.setDatasetAttribute('cc')
if tmpItem.has_key('usedup'):
datasetSpec.setDatasetAttribute('ud')
if tmpItem.has_key('random'):
datasetSpec.setDatasetAttribute('rd')
if tmpItem.has_key('reusable'):
datasetSpec.setDatasetAttribute('ru')
if tmpItem.has_key('offset'):
datasetSpec.setOffset(tmpItem['offset'])
if tmpItem.has_key('allowNoOutput'):
datasetSpec.allowNoOutput()
if tmpItem.has_key('nFilesPerJob'):
datasetSpec.setNumFilesPerJob(tmpItem['nFilesPerJob'])
if tmpItem.has_key('num_records'):
datasetSpec.setNumRecords(tmpItem['num_records'])
if 'transient' in tmpItem:
datasetSpec.setTransient(tmpItem['transient'])
datasetSpec.vo = self.taskSpec.vo
datasetSpec.nFiles = 0
datasetSpec.nFilesUsed = 0
datasetSpec.nFilesFinished = 0
datasetSpec.nFilesFailed = 0
datasetSpec.nFilesOnHold = 0
datasetSpec.nEvents = 0
datasetSpec.nEventsUsed = 0
datasetSpec.nEventsToBeUsed = 0
datasetSpec.status = 'defined'
if datasetSpec.type in JediDatasetSpec.getInputTypes() + ['random_seed']:
datasetSpec.streamName = RefinerUtils.extractStreamName(tmpItem['value'])
if not tmpItem.has_key('expandedList'):
tmpItem['expandedList'] = []
# dataset names could be comma-concatenated
datasetNameList = datasetSpec.datasetName.split(',')
# datasets could be added by incexec
incexecDS = 'dsFor{0}'.format(datasetSpec.streamName)
# remove /XYZ
incexecDS = incexecDS.split('/')[0]
if taskParamMap.has_key(incexecDS):
for tmpDatasetName in taskParamMap[incexecDS].split(','):
if not tmpDatasetName in datasetNameList:
datasetNameList.append(tmpDatasetName)
# loop over all dataset names
inDatasetSpecList = []
for datasetName in datasetNameList:
# skip empty
if datasetName == '':
continue
# expand
if datasetSpec.isPseudo() or datasetSpec.type in ['random_seed'] or datasetName == 'DBR_LATEST':
# pseudo input
tmpDatasetNameList = [datasetName]
elif tmpItem.has_key('expand') and tmpItem['expand'] == True:
# expand dataset container
tmpDatasetNameList = self.ddmIF.getInterface(self.taskSpec.vo).expandContainer(datasetName)
else:
# normal dataset name
tmpDatasetNameList = self.ddmIF.getInterface(self.taskSpec.vo).listDatasets(datasetName)
for elementDatasetName in tmpDatasetNameList:
if nIn > 0 or not elementDatasetName in tmpItem['expandedList']:
tmpItem['expandedList'].append(elementDatasetName)
inDatasetSpec = copy.copy(datasetSpec)
inDatasetSpec.datasetName = elementDatasetName
inDatasetSpec.containerName = datasetName
inDatasetSpecList.append(inDatasetSpec)
# empty input
if inDatasetSpecList == [] and self.oldTaskStatus != 'rerefine':
errStr = 'doBasicRefine : unknown input dataset "{0}"'.format(datasetSpec.datasetName)
self.taskSpec.setErrDiag(errStr)
if not datasetSpec.datasetName in self.unknownDatasetList:
self.unknownDatasetList.append(datasetSpec.datasetName)
raise JediException.UnknownDatasetError,errStr
# set master flag
for inDatasetSpec in inDatasetSpecList:
if nIn == 0:
# master
self.inMasterDatasetSpec.append(inDatasetSpec)
else:
# secondary
self.inSecDatasetSpecList.append(inDatasetSpec)
nIn += 1
continue
if datasetSpec.type in ['output','log']:
if not nOutMap.has_key(datasetSpec.type):
nOutMap[datasetSpec.type] = 0
# make stream name
datasetSpec.streamName = "{0}{1}".format(datasetSpec.type.upper(),nOutMap[datasetSpec.type])
nOutMap[datasetSpec.type] += 1
# set attribute for event service
if self.taskSpec.useEventService() and taskParamMap.has_key('objectStore') and datasetSpec.type in ['output']:
datasetSpec.setObjectStore(taskParamMap['objectStore'])
# extract output filename template and change the value field
outFileTemplate,tmpItem['value'] = RefinerUtils.extractReplaceOutFileTemplate(tmpItem['value'],
datasetSpec.streamName)
# make output template
if outFileTemplate != None:
if tmpItem.has_key('offset'):
offsetVal = 1 + tmpItem['offset']
else:
offsetVal = 1
outTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : offsetVal,
'streamName' : datasetSpec.streamName,
'filenameTemplate' : outFileTemplate,
'outtype' : datasetSpec.type,
}
if self.outputTemplateMap.has_key(datasetSpec.outputMapKey()):
# multiple files are associated to the same output datasets
self.outputTemplateMap[datasetSpec.outputMapKey()].append(outTemplateMap)
# don't insert the same output dataset
continue
self.outputTemplateMap[datasetSpec.outputMapKey()] = [outTemplateMap]
# append
self.outDatasetSpecList.append(datasetSpec)
# make unmerged dataset
if taskParamMap.has_key('mergeOutput') and taskParamMap['mergeOutput'] == True:
umDatasetSpec = JediDatasetSpec()
umDatasetSpec.datasetName = 'panda.um.' + datasetSpec.datasetName
umDatasetSpec.jediTaskID = self.taskSpec.jediTaskID
umDatasetSpec.storageToken = 'TOMERGE'
umDatasetSpec.vo = datasetSpec.vo
umDatasetSpec.type = "tmpl_trn_" + datasetSpec.type
umDatasetSpec.nFiles = 0
umDatasetSpec.nFilesUsed = 0
umDatasetSpec.nFilesToBeUsed = 0
umDatasetSpec.nFilesFinished = 0
umDatasetSpec.nFilesFailed = 0
umDatasetSpec.nFilesOnHold = 0
umDatasetSpec.status = 'defined'
umDatasetSpec.streamName = datasetSpec.streamName
if datasetSpec.isAllowedNoOutput():
umDatasetSpec.allowNoOutput()
# ratio
if datasetSpec.getRatioToMaster() > 1:
umDatasetSpec.setDatasetAttribute('ratio={0}'.format(datasetSpec.getRatioToMaster()))
# make unmerged output template
if outFileTemplate != None:
umOutTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : 1,
'streamName' : umDatasetSpec.streamName,
'outtype' : datasetSpec.type,
}
# append temporary name
if taskParamMap.has_key('umNameAtEnd') and taskParamMap['umNameAtEnd'] == True:
# append temporary name at the end
umOutTemplateMap['filenameTemplate'] = outFileTemplate + '.panda.um'
else:
umOutTemplateMap['filenameTemplate'] = 'panda.um.' + outFileTemplate
if self.outputTemplateMap.has_key(umDatasetSpec.outputMapKey()):
# multiple files are associated to the same output datasets
self.outputTemplateMap[umDatasetSpec.outputMapKey()].append(umOutTemplateMap)
# don't insert the same output dataset
continue
self.outputTemplateMap[umDatasetSpec.outputMapKey()] = [umOutTemplateMap]
# use log as master for merging
if datasetSpec.type == 'log':
self.unmergeMasterDatasetSpec[datasetSpec.outputMapKey()] = umDatasetSpec
else:
# append
self.unmergeDatasetSpecMap[datasetSpec.outputMapKey()] = umDatasetSpec
# set attributes for merging
if taskParamMap.has_key('mergeOutput') and taskParamMap['mergeOutput'] == True:
self.setSplitRule(None,1,JediTaskSpec.splitRuleToken['mergeOutput'])
# make job parameters
rndmSeedOffset = None
firstEventOffset = None
jobParameters = ''
for tmpItem in taskParamMap['jobParameters']:
if tmpItem.has_key('value'):
# hidden parameter
if tmpItem.has_key('hidden') and tmpItem['hidden'] == True:
continue
# add tags for ES-only parameters
esOnly = False
if 'es_only' in tmpItem and tmpItem['es_only'] == True:
esOnly = True
if esOnly:
jobParameters += '<PANDA_ES_ONLY>'
jobParameters += '{0}'.format(tmpItem['value'])
if esOnly:
jobParameters += '</PANDA_ES_ONLY>'
# padding
if tmpItem.has_key('padding') and tmpItem['padding'] == False:
pass
else:
jobParameters += ' '
# get offset for random seed and first event
if tmpItem['type'] == 'template' and tmpItem['param_type'] == 'number':
if '${RNDMSEED}' in tmpItem['value']:
if tmpItem.has_key('offset'):
rndmSeedOffset = tmpItem['offset']
else:
rndmSeedOffset = 0
elif '${FIRSTEVENT}' in tmpItem['value']:
if tmpItem.has_key('offset'):
firstEventOffset = tmpItem['offset']
jobParameters = jobParameters[:-1]
# append parameters for event service merging if necessary
esmergeParams = self.getParamsForEventServiceMerging(taskParamMap)
if esmergeParams != None:
jobParameters += esmergeParams
self.setJobParamsTemplate(jobParameters)
# set random seed offset
if rndmSeedOffset != None:
self.setSplitRule(None,rndmSeedOffset,JediTaskSpec.splitRuleToken['randomSeed'])
if firstEventOffset != None:
self.setSplitRule(None,firstEventOffset,JediTaskSpec.splitRuleToken['firstEvent'])
# return
return
# replace placeholder with dict provided by prepro job
def replacePlaceHolders(self,paramItem,placeHolderName,newValue):
if isinstance(paramItem,types.DictType):
# loop over all dict params
for tmpParName,tmpParVal in paramItem.iteritems():
if tmpParVal == placeHolderName:
# replace placeholder
paramItem[tmpParName] = newValue
elif isinstance(tmpParVal,types.DictType) or \
isinstance(tmpParVal,types.ListType):
# recursive execution
self.replacePlaceHolders(tmpParVal,placeHolderName,newValue)
elif isinstance(paramItem,types.ListType):
# loop over all list items
for tmpItem in paramItem:
self.replacePlaceHolders(tmpItem,placeHolderName,newValue)
# refinement procedure for preprocessing
def doPreProRefine(self,taskParamMap):
# no preprocessing
if not taskParamMap.has_key('preproSpec'):
return None,taskParamMap
# already preprocessed
if self.taskSpec.checkPreProcessed():
# get replaced task params
tmpStat,tmpJsonStr = self.taskBufferIF.getPreprocessMetadata_JEDI(self.taskSpec.jediTaskID)
try:
# replace placeholders
replaceParams = RefinerUtils.decodeJSON(tmpJsonStr)
self.tmpLog.debug("replace placeholders with "+str(replaceParams))
for tmpKey,tmpVal in replaceParams.iteritems():
self.replacePlaceHolders(taskParamMap,tmpKey,tmpVal)
except:
errtype,errvalue = sys.exc_info()[:2]
self.tmpLog.error('{0} failed to get additional task params with {1}:{2}'.format(self.__class__.__name__,
errtype.__name__,errvalue))
return False,taskParamMap
# succeeded
self.updatedTaskParams = taskParamMap
return None,taskParamMap
# make dummy dataset to keep track of preprocessing
datasetSpec = JediDatasetSpec()
datasetSpec.datasetName = 'panda.pp.in.{0}.{1}'.format(uuid.uuid4(),self.taskSpec.jediTaskID)
datasetSpec.jediTaskID = self.taskSpec.jediTaskID
datasetSpec.type = 'pp_input'
datasetSpec.vo = self.taskSpec.vo
datasetSpec.nFiles = 1
datasetSpec.nFilesUsed = 0
datasetSpec.nFilesToBeUsed = 1
datasetSpec.nFilesFinished = 0
datasetSpec.nFilesFailed = 0
datasetSpec.nFilesOnHold = 0
datasetSpec.status = 'ready'
self.inMasterDatasetSpec.append(datasetSpec)
# make file
fileSpec = JediFileSpec()
fileSpec.jediTaskID = datasetSpec.jediTaskID
fileSpec.type = datasetSpec.type
fileSpec.status = 'ready'
fileSpec.lfn = 'pseudo_lfn'
fileSpec.attemptNr = 0
fileSpec.maxAttempt = 3
fileSpec.keepTrack = 1
datasetSpec.addFile(fileSpec)
# make log dataset
logDatasetSpec = JediDatasetSpec()
logDatasetSpec.datasetName = 'panda.pp.log.{0}.{1}'.format(uuid.uuid4(),self.taskSpec.jediTaskID)
logDatasetSpec.jediTaskID = self.taskSpec.jediTaskID
logDatasetSpec.type = 'tmpl_pp_log'
logDatasetSpec.streamName = 'PP_LOG'
logDatasetSpec.vo = self.taskSpec.vo
logDatasetSpec.nFiles = 0
logDatasetSpec.nFilesUsed = 0
logDatasetSpec.nFilesToBeUsed = 0
logDatasetSpec.nFilesFinished = 0
logDatasetSpec.nFilesFailed = 0
logDatasetSpec.nFilesOnHold = 0
logDatasetSpec.status = 'defined'
self.outDatasetSpecList.append(logDatasetSpec)
# make output template for log
outTemplateMap = {'jediTaskID' : self.taskSpec.jediTaskID,
'serialNr' : 1,
'streamName' : logDatasetSpec.streamName,
'filenameTemplate' : "{0}._${{SN}}.log.tgz".format(logDatasetSpec.datasetName),
'outtype' : re.sub('^tmpl_','',logDatasetSpec.type),
}
self.outputTemplateMap[logDatasetSpec.outputMapKey()] = [outTemplateMap]
# set split rule to use preprocessing
self.taskSpec.setPrePro()
# set task status
self.taskSpec.status = 'topreprocess'
# return
return True,taskParamMap
# set split rule
def setSplitRule(self,taskParamMap,keyName,valName):
if taskParamMap != None:
if not taskParamMap.has_key(keyName):
return
tmpStr = '{0}={1}'.format(valName,taskParamMap[keyName])
else:
tmpStr = '{0}={1}'.format(valName,keyName)
if self.taskSpec.splitRule in [None,'']:
self.taskSpec.splitRule = tmpStr
else:
tmpMatch = re.search(valName+'=(-*\d+)',self.taskSpec.splitRule)
if tmpMatch == None:
# append
self.taskSpec.splitRule += ',{0}'.format(tmpStr)
else:
# replace
self.taskSpec.splitRule = re.sub(valName+'=(-*\d+)',
tmpStr,
self.taskSpec.splitRule)
return
# get parameters for event service merging
def getParamsForEventServiceMerging(self,taskParamMap):
# no event service
if not self.taskSpec.useEventService():
return None
# extract parameters
transPath = 'UnDefined'
jobParameters = 'UnDefined'
if taskParamMap.has_key('esmergeSpec'):
if taskParamMap['esmergeSpec'].has_key('transPath'):
transPath = taskParamMap['esmergeSpec']['transPath']
if taskParamMap['esmergeSpec'].has_key('jobParameters'):
jobParameters = taskParamMap['esmergeSpec']['jobParameters']
# return
return '<PANDA_ESMERGE_TRF>'+transPath+'</PANDA_ESMERGE_TRF>'+'<PANDA_ESMERGE_JOBP>'+jobParameters+'</PANDA_ESMERGE_JOBP>'
Interaction.installSC(TaskRefinerBase)
| apache-2.0 |
xya/sms-tools | lectures/03-Fourier-properties/plots-code/shift.py | 26 | 1223 | import matplotlib.pyplot as plt
import numpy as np
import sys
from scipy.signal import sawtooth
sys.path.append('../../../software/models/')
import dftModel as DF
N = 128
x1 = sawtooth(2*np.pi*np.arange(-N/2,N/2)/float(N))
x2 = sawtooth(2*np.pi*np.arange(-N/2-2,N/2-2)/float(N))
mX1, pX1 = DF.dftAnal(x1, np.ones(N), N)
mX2, pX2 = DF.dftAnal(x2, np.ones(N), N)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(321)
plt.title('x1=x[n]')
plt.plot(np.arange(-N/2, N/2, 1.0), x1, lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(322)
plt.title('x2=x[n-2]')
plt.plot(np.arange(-N/2, N/2, 1.0), x2, lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(323)
plt.title('mX1')
plt.plot(np.arange(0, mX1.size, 1.0), mX1, 'r', lw=1.5)
plt.axis([0,mX1.size,min(mX1),max(mX1)])
plt.subplot(324)
plt.title('mX2')
plt.plot(np.arange(0, mX2.size, 1.0), mX2, 'r', lw=1.5)
plt.axis([0,mX2.size,min(mX2),max(mX2)])
plt.subplot(325)
plt.title('pX1')
plt.plot(np.arange(0, pX1.size, 1.0), pX1, 'c', lw=1.5)
plt.axis([0,pX1.size,min(pX1),max(pX2)])
plt.subplot(326)
plt.title('pX2')
plt.plot(np.arange(0, pX2.size, 1.0), pX2, 'c', lw=1.5)
plt.axis([0,pX2.size,min(pX2),max(pX2)])
plt.tight_layout()
plt.savefig('shift.png')
plt.show()
| agpl-3.0 |
johnboyington/homework | me777/hw3/hw3-p2.py | 1 | 3542 | '''
me777 hw3 problem 2, book problem 4-6
'''
from numpy.random import rand
import numpy as np
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from time import time
class Wedge_Tail(object):
def __init__(self):
self.k = 20
self.delta = 0.5
self.i = np.array(range(1, self.k + 1))
self.x = np.array([0] + list(self.i * self.delta))
self.calc_areas()
def sample(self):
rho = rand()
self.j = 0
for j, a in enumerate(self.areas):
if rho > a:
self.j = j + 1
else:
break
# grab a new random number
self.rho = rand()
return float(self.cdfs())
def calc_areas(self):
A_i = self.delta * np.exp(-self.i * self.delta)
A_k_plus_i = np.exp(-self.i * self.delta)*(np.exp(self.delta) - 1 - self.delta)
A_2k_plus_i = np.exp(-self.k * self.delta)
areas = []
for A in A_i:
areas.append(A)
for A in A_k_plus_i:
areas.append(A)
for A in [A_2k_plus_i]:
areas.append(A)
total_area = sum(areas)
areas = np.array(areas) / total_area
self.areas = np.array(areas) / total_area
for i, a in enumerate(self.areas):
self.areas[i] = np.sum(areas[0:i+1])
def cdfs(self):
if self.j == len(self.areas):
self.x_L = max(self.x)
return fsolve(self.cdf3, 5)
elif self.j >= len(self.areas[:-1]) / 2:
self.x_L = self.x[self.j - len(self.x)]
return fsolve(self.cdf2, 2)
else:
self.x_L = self.x[self.j]
return self.rho * self.delta + self.x_L
def cdf1(self, x):
return ((1 / self.delta) * (x - self.x_L)) - self.rho
def cdf2(self, x):
A = np.exp(self.delta) - 1 - self.delta
T1 = -np.exp(-x + self.i * self.delta) - x
T2 = -np.exp(-self.x_L + self.i * self.delta) - self.x_L
return ((1/A) * (T1 - T2)) - self.rho
def cdf3(self, x):
T1 = -np.exp(-x + self.k * self.delta)
T2 = np.exp(-self.x_L + self.k * self.delta)
return (T1 + T2) - self.rho
def sample_icdf(self):
rho = rand()
return -np.log(rho)
WT = Wedge_Tail()
##########################################
# test
##########################################
if True:
xs = WT.x
scores = np.zeros(len(xs))
for i in range(100000):
s = WT.sample_icdf()
for j in range(len(xs)-1):
if s > xs[j] and s < xs[j+1]:
scores[j] += 1
scores = (scores / sum(scores)) * (1 / (xs[1] - xs[0]))
x_vals = np.linspace(0, 10, 100)
y_vals = np.exp(-x_vals)
#########################################
# timing
#########################################
N_samples = 100000
t_rwt = time()
for i in range(N_samples):
WT.sample()
t_rwt = time() - t_rwt
t_icdf = time()
for i in range(N_samples):
WT.sample_icdf()
t_icdf = time() - t_icdf
print('\n')
print('Rectangle-Wedge-Tail Method: {}'.format(t_rwt))
print('Inverse CDF Method: {}'.format(t_icdf))
print('Ratio: {}'.format(t_rwt / t_icdf))
#########################################
# plotting
#########################################
fig = plt.figure(0)
ax = fig.add_subplot(111)
ax.step(xs, scores, 'k', where='post', label='Sampling')
ax.plot(x_vals, y_vals, 'g-.', label='$e^{-x}$')
ax.set_xlabel('x')
ax.set_ylabel('p(x)')
ax.set_yscale('log')
ax.legend()
| gpl-3.0 |
olgabot/prettyplotlib | prettyplotlib/__init__.py | 1 | 1033 | #!/usr/bin/env python
from __future__ import absolute_import
import matplotlib as mpl
import brewer2mpl
from ._bar import bar
from ._barh import barh
from ._boxplot import boxplot
from ._beeswarm import beeswarm
from ._eventplot import eventplot
from ._hist import hist
from ._legend import legend
from ._plot import plot
from ._pcolormesh import pcolormesh
from ._scatter import scatter
from ._fill_between import fill_between
from ._fill_betweenx import fill_betweenx
from ._stackplot import stackplot
from .general import *
def scatter_column(ax, x, **kwargs):
"""
Creates a boxplot-like 'scatter column' which is like a boxplot, though
it plots the values of
"""
pass
def switch_axis_limits(ax, which_axis):
'''
Switch the axis limits of either x or y. Or both!
'''
for a in which_axis:
assert a in ('x', 'y')
ax_limits = ax.axis()
if a == 'x':
ax.set_xlim(ax_limits[1], ax_limits[0])
else:
ax.set_ylim(ax_limits[3], ax_limits[2])
| mit |
rvraghav93/scikit-learn | sklearn/neighbors/tests/test_lof.py | 26 | 4108 | # Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from math import sqrt
import numpy as np
from sklearn import neighbors
from numpy.testing import assert_array_equal
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.datasets import load_iris
# load the iris dataset
# and randomly permute it
rng = check_random_state(0)
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_lof():
# Toy sample (the last two samples are outliers):
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [5, 3], [-4, 2]]
# Test LocalOutlierFactor:
clf = neighbors.LocalOutlierFactor(n_neighbors=5)
score = clf.fit(X).negative_outlier_factor_
assert_array_equal(clf._fit_X, X)
# Assert largest outlier score is smaller than smallest inlier score:
assert_greater(np.min(score[:-2]), np.max(score[-2:]))
# Assert predict() works:
clf = neighbors.LocalOutlierFactor(contamination=0.25,
n_neighbors=5).fit(X)
assert_array_equal(clf._predict(), 6 * [1] + 2 * [-1])
def test_lof_performance():
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = neighbors.LocalOutlierFactor().fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf._decision_function(X_test)
# check that roc_auc is good
assert_greater(roc_auc_score(y_test, y_pred), .99)
def test_lof_values():
# toy samples:
X_train = [[1, 1], [1, 2], [2, 1]]
clf = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X_train)
s_0 = 2. * sqrt(2.) / (1. + sqrt(2.))
s_1 = (1. + sqrt(2)) * (1. / (4. * sqrt(2.)) + 1. / (2. + 2. * sqrt(2)))
# check predict()
assert_array_almost_equal(-clf.negative_outlier_factor_, [s_0, s_1, s_1])
# check predict(one sample not in train)
assert_array_almost_equal(-clf._decision_function([[2., 2.]]), [s_0])
# # check predict(one sample already in train)
assert_array_almost_equal(-clf._decision_function([[1., 1.]]), [s_1])
def test_lof_precomputed(random_state=42):
"""Tests LOF with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
# As a feature matrix (n_samples by n_features)
lof_X = neighbors.LocalOutlierFactor(n_neighbors=3)
lof_X.fit(X)
pred_X_X = lof_X._predict()
pred_X_Y = lof_X._predict(Y)
# As a dense distance matrix (n_samples by n_samples)
lof_D = neighbors.LocalOutlierFactor(n_neighbors=3, algorithm='brute',
metric='precomputed')
lof_D.fit(DXX)
pred_D_X = lof_D._predict()
pred_D_Y = lof_D._predict(DYX)
assert_array_almost_equal(pred_X_X, pred_D_X)
assert_array_almost_equal(pred_X_Y, pred_D_Y)
def test_n_neighbors_attribute():
X = iris.data
clf = neighbors.LocalOutlierFactor(n_neighbors=500).fit(X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
clf = neighbors.LocalOutlierFactor(n_neighbors=500)
assert_warns_message(UserWarning,
"n_neighbors will be set to (n_samples - 1)",
clf.fit, X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
| bsd-3-clause |
kashif/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
buhe/mlhack | logRegres.py | 1 | 1533 | #encoding=utf-8
from numpy import *
def loadDataSet():
dataMat = []
labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
def gradAscent(dataMatIn,classLabels):#梯度上升算法
dataMatrix = mat(dataMatIn)#数组矩阵化
labelMat = mat(classLabels).transpose()#矩阵化后,行向量转为列向量
m,n = shape(dataMatrix)# 100 行,3 列
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights) #结果是个列向量
error = (labelMat - h) #真实 减去 实际,算出差值
weights = weights + alpha * dataMatrix.transpose() * error
return weights
def plotBestFit(wei):
import matplotlib.pyplot as plt
weights = wei.getA()
dataMat,labelMat = loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []
ycord1 = []
xcord2 = []
ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
xcord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s = 30,c = 'red',marker='s')
ax.scatter(xcord2,ycord2,s = 30,c = 'green')
x = arange(-3.0,3.0,0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x,y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
| mit |
belltailjp/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
deeplook/bokeh | bokeh/util/serialization.py | 31 | 7419 | """ Functions for helping with serialization and deserialization of
Bokeh objects.
"""
from __future__ import absolute_import
from six import iterkeys
is_numpy = None
try:
import numpy as np
is_numpy = True
except ImportError:
is_numpy = False
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
import logging
log = logging.getLogger(__name__)
_simple_id = 1000
def make_id():
""" Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
"""
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def urljoin(*args):
""" Construct an absolute URL from several URL components.
Args:
*args (str) : URL components to join
Returns:
str : joined URL
"""
from six.moves.urllib.parse import urljoin as sys_urljoin
from functools import reduce
return reduce(sys_urljoin, args)
def get_json(response):
""" Unify retrieving JSON responses from different sources.
Works correctly for HTTP responses from requests <=1.0, >1.0, and
the Flask test client.
Args:
response (Flask or requests response) : a response to process
Returns:
JSON
"""
import json
try:
import flask
except ImportError:
flask = None
if flask and isinstance(response, flask.Response):
# flask testing
return json.loads(response.data.decode('utf-8'))
else:
# requests
if hasattr(response.json, '__call__'):
return response.json()
else:
return response.json
def dump(objs, docid, changed_only=True):
""" Serialize a sequence of Bokeh objects into JSON
Args:
objs (seq[obj]) : a sequence of Bokeh object to dump
docid (str) : an ID for a Bokeh Document to dump relative to
changed_only (bool, optional) : whether to dump only attributes
that have had their values changed at some point (default: True)
Returns:
list[json]
"""
json_objs = []
for obj in objs:
ref = obj.ref
ref["attributes"] = obj.vm_serialize(changed_only=changed_only)
ref["attributes"].update({"id": ref["id"], "doc" : docid})
json_objs.append(ref)
return json_objs
def is_ref(frag):
""" Test whether a given Bokeh object graph fragment is a reference.
A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys.
Args:
frag (dict) : a fragment of a Bokeh object graph
Returns:
True, if the fragment is a reference, otherwise False
"""
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
""" Apply a function to JSON fragments that match the given predicate
and return the collected results.
Recursively traverses a nested collection of ``dict`` and ``list``,
applying ``check_func`` to each fragment. If True, then collect
``func(fragment)`` in the final output
Args:
fragment (JSON-like) : the fragment to apply ``func`` to recursively
check_func (callable) : the predicate to test fragments with
func (callable) : the conversion function to apply
Returns:
converted fragments
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def transform_series(obj):
"""transforms pandas series into array of values
"""
vals = obj.values
return transform_array(vals)
def transform_array(obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / 10**6.0).tolist()
else:
return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def traverse_data(datum, is_numpy=is_numpy, use_numpy=True):
"""recursively dig until a flat list is found
if numpy is available convert the flat list to a numpy array
and send off to transform_array() to handle nan, inf, -inf
otherwise iterate through items in array converting non-json items
Args:
datum (list) : a list of values or lists
is_numpy: True if numpy is present (see imports)
use_numpy: toggle numpy as a dependency for testing purposes
"""
is_numpy = is_numpy and use_numpy
if is_numpy and not any(isinstance(el, (list, tuple)) for el in datum):
return transform_array(np.asarray(datum))
datum_copy = []
for item in datum:
if isinstance(item, (list, tuple)):
datum_copy.append(traverse_data(item))
elif isinstance(item, float):
if np.isnan(item):
item = 'NaN'
elif np.isposinf(item):
item = 'Infinity'
elif np.isneginf(item):
item = '-Infinity'
datum_copy.append(item)
else:
datum_copy.append(item)
return datum_copy
def transform_column_source_data(data):
"""iterate through the data of a ColumnSourceData object replacing
non-JSON-compliant objects with compliant ones
"""
data_copy = {}
for key in iterkeys(data):
if is_pandas and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
| bsd-3-clause |
hrabcak/jsbsim | tests/CheckMomentsUpdate.py | 1 | 3692 | # CheckMomentsUpdate.py
#
# Regression test to check the moments are computed according to the last
# update of the CG location (issue reported by Marta Marimon)
#
# Copyright (c) 2015 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import pandas as pd
from JSBSim_utils import JSBSimTestCase, CreateFDM, ExecuteUntil, RunTest
mol2lbs = 0.00013841 * 32.174049
class CheckMomentsUpdate(JSBSimTestCase):
def CheckCGPosition(self):
weight = self.fdm['inertia/weight-lbs']
empty_weight = self.fdm['inertia/empty-weight-lbs']
contents = self.fdm['buoyant_forces/gas-cell/contents-mol']
radiosonde_weight = weight - empty_weight - contents * mol2lbs
CGx = self.fdm['inertia/cg-x-in']
CGy = self.fdm['inertia/cg-y-in']
CGz = self.fdm['inertia/cg-z-in']
X = self.fdm['inertia/pointmass-location-X-inches']
Y = self.fdm['inertia/pointmass-location-Y-inches']
Z = self.fdm['inertia/pointmass-location-Z-inches']
self.assertAlmostEqual(CGx, X * radiosonde_weight / weight, delta=1E-7)
self.assertAlmostEqual(CGy, Y * radiosonde_weight / weight, delta=1E-7)
self.assertAlmostEqual(CGz, Z * radiosonde_weight / weight, delta=1E-7)
def test_moments_update(self):
script_path = self.sandbox.path_to_jsbsim_file('scripts',
'weather-balloon.xml')
self.fdm = CreateFDM(self.sandbox)
self.fdm.load_script(script_path)
self.fdm.set_output_directive(self.sandbox.path_to_jsbsim_file('tests', 'output.xml'))
self.fdm.run_ic()
self.CheckCGPosition()
dt = self.fdm['simulation/dt']
ExecuteUntil(self.fdm, 1.0-2.0*dt)
self.CheckCGPosition()
# Moves the radio sonde to modify the CG location
self.fdm['inertia/pointmass-location-X-inches'] = 5.0
# Check that the moment is immediately updated accordingly
self.fdm.run()
self.CheckCGPosition()
Fbx = self.fdm['forces/fbx-buoyancy-lbs']
Fbz = self.fdm['forces/fbz-buoyancy-lbs']
CGx = self.fdm['inertia/cg-x-in'] / 12.0 # Converts from in to ft
CGz = self.fdm['inertia/cg-z-in'] / 12.0
Mby = self.fdm['moments/m-buoyancy-lbsft']
self.assertAlmostEqual(Fbx * CGz - Fbz * CGx, Mby, delta=1E-7,
msg="Fbx*CGz-Fbz*CGx = %f and Mby = %f do not match" % (Fbx*CGz-Fbz*CGx, Mby))
# One further step to log the same results in the output file
self.fdm.run()
self.CheckCGPosition()
csv = pd.read_csv('output.csv')
Mby = csv['M_{Buoyant} (ft-lbs)'].iget(-1)
Fbx = csv['F_{Buoyant x} (lbs)'].iget(-1)
Fbz = csv['F_{Buoyant z} (lbs)'].iget(-1)
self.assertAlmostEqual(Fbx * CGz - Fbz * CGx, Mby, delta=1E-7,
msg="Fbx*CGz-Fbz*CGx = %f and Mby = %f do not match" % (Fbx*CGz-Fbz*CGx, Mby))
RunTest(CheckMomentsUpdate)
| lgpl-2.1 |
abimannans/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
jchrismer/PiQuad | Calibration/Main_Gyro_Accel_Calib.py | 1 | 4337 | """
Inertial sensors calibration
------------------------
Library containing functions which preforms ellipsoid fitting on given input.
History:
V1.0 - 10/12/16 (initial release)
To Do:
Add command line input and parsing
references:
Tedaldi, David, Alberto Pretto, and Emanuele Menegatti. "A robust and easy to implement method for IMU calibration without external e10)
quipments." 2014 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2014.
Contact:
[email protected]
Project blog:
http://aerialarithmetic.blogspot.com/
"""
import Inertial_Calibration as IC
import numpy as np
import csv
import statistics
from lmfit import minimize, Parameters
import matplotlib.pyplot as plt
import EllipsoidFit
def getData(filename):
with open(filename, 'rt') as csvfile:
csvReader = csv.reader(csvfile, delimiter=',')
x=list(csvReader)
Data =np.array(x).astype('float')
return Data
# csv file of sensor of recorded data
Accel = getData('/home/joseph/Desktop/Project/Matlab/Calibration/Finished_code/Calibration/Accel_demo.csv')
Gyro = getData('/home/joseph/Desktop/Project/Matlab/Calibration/Finished_code/Calibration/Gyro_demo.csv')
options = [50,1]
Sample_length = len(Accel)
Time = [1.0/100.0] * Sample_length
T = Accel[:,0]
Accel = Accel[:,1:4]
Gyro = Gyro[:,1:4]
i = 1
Valid_intervals_starts, Valid_intervals_ends,visualize,init_staitc = IC.static_invertal_detection(Gyro,Time,options,3.5)
# Pull out stati intervals and find their mean
num_static = len(Valid_intervals_starts)
Static_accel = np.zeros((3,num_static))
# lls fit (theoretically) needs at least 9 points to fit an ellipsoid too. More is better though
if(num_static < 9):
print("Variance threshold multiplier: %d could not generate minimal required static intervals - SKIPPING" %i)
for i in range(0,num_static):
ax = statistics.mean(Accel[Valid_intervals_starts[i]:(Valid_intervals_ends[i]+1),0])
ay = statistics.mean(Accel[Valid_intervals_starts[i]:(Valid_intervals_ends[i]+1),1])
az = statistics.mean(Accel[Valid_intervals_starts[i]:(Valid_intervals_ends[i]+1),2])
Static_accel[:,i] = [ax,ay,az]
# Construct initial estimates for theta_accel using LLS fit on the static intervals
plt.plot(Accel[:,0])
plt.hold(True)
plt.plot(Accel[:,1],'r')
plt.plot(Accel[:,2],'g')
plt.plot(visualize,'k')
plt.show()
center,A_inv,radii = EllipsoidFit.lls_fit(Static_accel.transpose())
Grav = 9.81744
radii = [radii[0]/Grav,radii[1]/Grav,radii[2]/Grav]
# TODO: Implement LVM fitting for accelerometer and gyroscope
theta_acc = Parameters()
theta_acc.add('ax', value=0)
theta_acc.add('ay', value=0)
theta_acc.add('az', value=0)
theta_acc.add('kx', value=1.0/radii[0])
theta_acc.add('ky', value=1.0/radii[1])
theta_acc.add('kz', value=1.0/radii[2])
theta_acc.add('bx', value=center[0])
theta_acc.add('by', value=center[1])
theta_acc.add('bz', value=center[2])
out = minimize(IC.accel_resid, theta_acc, args=Static_accel)
#cost = out.chisqr
''' Gyroscope calibration '''
static_interval_count = len(Valid_intervals_starts)
# Creat storage list
Gyro_movements = (static_interval_count-1) * [[[]]]
for i in range(0,static_interval_count-1):
gyro_start = Valid_intervals_ends[i]+1
gyro_ends = Valid_intervals_starts[i+1]-1
Gyro_movements[i] = Gyro[gyro_start:(gyro_ends),:]
#plt.plot(Gyro[:,0])
#plt.hold(True)
#plt.plot(Gyro[:,1],'r')
#plt.plot(Gyro[:,2],'g')
#plt.plot(visualize,'k')
#plt.show()
# Setup gryscope parameters
theta_gyro = Parameters()
theta_gyro.add('gamma_yz', value=0)
theta_gyro.add('gamma_zy', value=0)
theta_gyro.add('gamma_xz', value=0)
theta_gyro.add('gamma_zx', value=0)
theta_gyro.add('gamma_xy', value=0)
theta_gyro.add('gamma_yx', value=0)
theta_gyro.add('sx', value=1.0/6258.0)
theta_gyro.add('sy', value=1.0/6258.0)
theta_gyro.add('sz', value=1.0/6258.0)
# Get gyroscpoe bias from initial static interval
gbx = statistics.mean(Gyro[0:init_staitc,0])
gby = statistics.mean(Gyro[0:init_staitc,1])
gbz = statistics.mean(Gyro[0:init_staitc,2])
Gyro_intervals = IC.GyroData(Gyro_movements,gbx,gby,gbz)
Accel_intervals = IC.AccelData(Static_accel)
Accel_intervals.Accel = Accel_intervals.applyCalib(out.params,Static_accel)
out_gyro = minimize(IC.gyro_resid, theta_gyro, args=(Gyro_intervals,Accel_intervals,Time))
print(out_gyro.params)
print(out.params) | gpl-3.0 |
hainm/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
cl4rke/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
auDeep/auDeep | audeep/backend/parsers/urban_sound_8k.py | 1 | 7457 | # Copyright (C) 2017-2018 Michael Freitag, Shahin Amiriparian, Sergey Pugachevskiy, Nicholas Cummins, Björn Schuller
#
# This file is part of auDeep.
#
# auDeep is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# auDeep is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with auDeep. If not, see <http://www.gnu.org/licenses/>.
"""A parser for the UrbanSound8K data set"""
from pathlib import Path
from typing import Optional, Mapping, Sequence
import pandas as pd
from audeep.backend.data.data_set import Split
from audeep.backend.log import LoggingMixin
from audeep.backend.parsers.base import Parser, _InstanceMetadata
class _ColumnNames:
"""
Names of the metadata columns in the UrbanSound8K metadata CSV file.
"""
FILENAME = "slice_file_name"
CV_FOLD = "fold"
LABEL_NUMERIC = "classID"
LABEL_NOMINAL = "class"
class UrbanSound8KParser(LoggingMixin, Parser):
"""
Parser for the UrbanSound8K data set.
"""
def __init__(self, basedir: Path):
"""
Creates and initializes a new UrbanSound8KParser for the specified data set base directory.
Parameters
----------
basedir: pathlib.Path
The data set base directory
"""
super().__init__(basedir)
self._metadata_cache = None
self._label_map_cache = None
def _metadata(self) -> pd.DataFrame:
"""
Reads the metadata/UrbanSound8K.csv metadata file.
The file is read once and cached.
Returns
-------
pandas.DataFrame
The contents of the metadata/UrbanSound8K.csv metadata file as a pandas DataFrame
Raises
------
IOError
If the data set cannot be parsed
"""
if not self.can_parse():
raise IOError("unable to parse UrbanSound8K dataset at {}".format(self._basedir))
if self._metadata_cache is None:
self._metadata_cache = pd.read_csv(self._basedir / "metadata" / "UrbanSound8K.csv")
# noinspection PyTypeChecker
return self._metadata_cache
def can_parse(self) -> bool:
"""
Checks whether the data set base directory contains the UrbanSound8K data set.
Currently, this method checks whether the metadata/UrbanSound8K.csv file exists, and whether the audio directory
exists within the data set base directory.
Returns
-------
bool
True, if the data set base directory contains the UrbanSound8K data set, False otherwise
"""
audio_dir = self._basedir / "audio"
metadata_file = self._basedir / "metadata" / "UrbanSound8K.csv"
if not audio_dir.exists():
self.log.debug("cannot parse: audio directory at %s missing", audio_dir)
return False
if not metadata_file.exists():
self.log.debug("cannot parse: metadata file at %s missing", metadata_file)
return False
return True
@property
def num_instances(self) -> int:
"""
Returns the number of instances in the data set.
Returns
-------
int
The number of instances in the data set
Raises
------
IOError
If the data set cannot be parsed
"""
if not self.can_parse():
raise IOError("Unable to parse UrbanSound8K data set at {}".format(self._basedir))
return len(self._metadata())
@property
def num_folds(self) -> int:
"""
Returns the number of cross-validation folds, which is ten for this parser.
Returns
-------
int
ten
Raises
------
IOError
If the data set cannot be parsed
"""
if not self.can_parse():
raise IOError("Unable to parse UrbanSound8K data set at {}".format(self._basedir))
return 10
@property
def label_map(self) -> Optional[Mapping[str, int]]:
"""
Returns the mapping of nominal to numeric labels.
The UrbanSound8K specifies a custom mapping, which is returned by this method.
Returns
-------
map of str to int
The mapping of nominal to numeric labels
Raises
------
IOError
If the data set cannot be parsed
"""
if not self.can_parse():
raise IOError("Unable to parse UrbanSound8K data set at {}".format(self._basedir))
if self._label_map_cache is None:
self._label_map_cache = {}
for _, row in self._metadata().iterrows():
label_nominal = row.loc[_ColumnNames.LABEL_NOMINAL]
label_numeric = row.loc[_ColumnNames.LABEL_NUMERIC]
if label_nominal not in self._label_map_cache:
self._label_map_cache[label_nominal] = label_numeric
elif self._label_map_cache[label_nominal] != label_numeric:
raise IOError("inconsistent labels: %s has numeric values %d and %d", label_nominal, label_numeric,
self._label_map_cache[label_nominal])
return self._label_map_cache
def parse(self) -> Sequence[_InstanceMetadata]:
"""
Parses the instances contained in this data set.
For each instance, metadata is computed and stored in an _InstanceMetadata object. Instances are parsed in the
order in which they appear in the metadata/UrbanSound8K.csv file.
Returns
-------
list of _InstanceMetadata
A list of _InstanceMetadata containing one entry for each parsed audio file
Raises
------
IOError
If the data set cannot be parsed
"""
if not self.can_parse():
raise IOError("Unable to parse UrbanSound8K data set at {}".format(self._basedir))
meta_list = []
for _, row in self._metadata().iterrows():
filename = row.loc[_ColumnNames.FILENAME]
path = self._basedir / "audio" / ("fold%d" % row.loc[_ColumnNames.CV_FOLD]) / filename
cv_folds = [Split.TRAIN] * 10
cv_folds[row.loc[_ColumnNames.CV_FOLD] - 1] = Split.VALID
label_nominal = row.loc[_ColumnNames.LABEL_NOMINAL]
instance_metadata = _InstanceMetadata(path=path,
filename=filename,
label_nominal=label_nominal,
label_numeric=None,
cv_folds=cv_folds,
partition=None)
self.log.debug("parsed instance %s: label = %s", filename, label_nominal)
meta_list.append(instance_metadata)
return meta_list
| gpl-3.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/plotting/test_groupby.py | 6 | 2419 | # coding: utf-8
""" Test cases for GroupBy.plot """
from pandas import Series, DataFrame
import pandas.util.testing as tm
import numpy as np
from pandas.tests.plotting.common import TestPlotBase
tm._skip_module_if_no_mpl()
class TestDataFrameGroupByPlots(TestPlotBase):
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(['male', 'female'], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
# Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame({'def': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'val': np.random.randn(9)},
index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0])
df.groupby('def')['val'].plot()
tm.close()
df.groupby('def')['val'].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"],
"ByCol": [1, 2],
"Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
df.groupby('z').plot.scatter('x', 'y')
tm.close()
df.groupby('z')['x'].plot.line()
tm.close()
def test_plot_kwargs(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
res = df.groupby('z').plot(kind='scatter', x='x', y='y')
# check that a scatter plot is effectively plotted: the axes should
# contain a PathCollection from the scatter plot (GH11805)
assert len(res['a'].collections) == 1
res = df.groupby('z').plot.scatter(x='x', y='y')
assert len(res['a'].collections) == 1
| mit |
marcocaccin/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
MPC-Berkeley/barc | workspace/src/barc/src/control/pid.py | 2 | 2859 | #!/usr/bin/env python
# ---------------------------------------------------------------------------
# Licensing Information: You are free to use or extend these projects for
# education or reserach purposes provided that (1) you retain this notice
# and (2) you provide clear attribution to UC Berkeley, including a link
# to http://barc-project.com
#
# Attibution Information: The barc project ROS code-base was developed
# at UC Berkeley in the Model Predictive Control (MPC) lab by Jon Gonzales
# ([email protected]). The cloud services integation with ROS was developed
# by Kiet Lam ([email protected]). The web-server app Dator was
# based on an open source project by Bruce Wootton
# ---------------------------------------------------------------------------
class PID:
def __init__(self, P=2.0, I=0.0, D=0, de=0, e_int = 0, Integrator_max=500, Integrator_min=-500):
self.Kp = P # proportional gain
self.Ki = I # integral gain
self.Kd = D # derivative gain
self.set_point = 0 # reference point
self.e = 0
self.e_int = 0
self.int_e_max = Integrator_max
self.int_e_min = Integrator_min
self.current_value = 0
def update(self,current_value, dt):
self.current_value = current_value
e_t = self.set_point - current_value
de_t = ( e_t - self.e)/dt
self.e_int = self.e_int + e_t * dt
if self.e_int > self.int_e_max:
self.e_int = self.int_e_max
elif self.e_int < self.int_e_min:
self.e_int = self.int_e_min
P_val = self.Kp * e_t
I_val = self.Ki * self.e_int
D_val = self.Kd * de_t
PID = P_val + I_val + D_val
self.e = e_t
return PID
def setPoint(self,set_point):
self.set_point = set_point
# reset error integrator
self.e_int = 0
# reset error, otherwise de/dt will skyrocket
self.e = set_point - self.current_value
def setKp(self,P):
self.Kp=P
def setKi(self,I):
self.Ki=I
def setKd(self,D):
self.Kd=D
def getPoint(self):
return self.set_point
def getError(self):
return self.e
#%% Example function
def fx(x, u, dt):
x_next = x + (3*x + u) * dt
return x_next
#%% Test script to ensure program is functioning properly
if __name__ == "__main__":
from numpy import zeros
import matplotlib.pyplot as plt
n = 200
x = zeros(n)
x[0] = 20
pid = PID(P=3.7, I=5, D=0.5)
dt = 0.1
for i in range(n-1):
u = pid.update(x[i], dt)
x[i+1] = fx(x[i],u, dt)
plt.plot(x)
plt.show()
| mit |
seismology/mc_kernel | tests/plot_gabor_filter_results.py | 1 | 1704 |
# coding: utf-8
# In[1]:
import filtering
import numpy as np
import matplotlib.pyplot as plt
# Load input time trace
signal_in_temp = np.loadtxt('../tests/gaborinput.txt')
len_signal = 1000
dt = 0.1
t = np.array(range(0, len_signal)) * dt
signal_in = np.zeros((len_signal, 1))
signal_in[:, 0] = signal_in_temp
# Define filter bank
pmax = (2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0)
tshift = (0.0, 2.5, 0.0, 2.5, 0.0, 2.5, 0.0, 5.0, 0.0, 5.0, 0.0, 5.0)
sigmaIfc = (0.3, 0.3, 0.5, 0.5, 0.7, 0.7, 0.3, 0.3, 0.5, 0.5, 0.7, 0.7)
fig = plt.figure(figsize=(15, 10))
# Load MC Kernel results for each filter and plot
for ifilt in range(0, len(pmax)):
ax = fig.add_subplot(4, 3, ifilt + 1)
signal_out_ref = filtering.gaborfilter(signal_in, dt=dt,
pmax=pmax[ifilt],
nscale=1, fmult=2,
sigmaIfc=sigmaIfc[ifilt],
npad=2048,
tshift=(tshift[ifilt], 0))
# Load MC Kernel results
fnam = '../tests/gaboroutput_%02d.txt' % (ifilt+1)
signal_out_kernel = np.loadtxt(fnam)
diff = (signal_out_kernel[0:len_signal] -
signal_out_ref[0][0:len_signal, 0, 0])
# Plot Reference, MC Kernel and Difference * 1e6
ax.plot(t, signal_out_ref[0][0:len_signal, 0, 0], 'r', label='PyFFProc')
ax.plot(t, signal_out_kernel[0:len_signal], 'k', label='MC Kerner')
ax.plot(t, diff*1e6, 'b', label='Difference x $10^6$')
ax.set_xlim((45, 55))
if (ifilt == 2):
ax.legend()
fig.savefig('Gaborfilter_compare_test_reference.png', dpi=200)
| gpl-3.0 |
martin-fabbri/reinforcement-learning-playground | rl-hello-world/rl-hello-world.py | 1 | 2905 | """
Simple Reinforcement Learning agent resolving a deterministic problem
"""
import numpy as np
import pandas as pd
import time
np.random.seed(2) # reproducible results
# Environment config
N_STATES = 6 # length of the one-dimensional world
ACTIONS = ['left', 'right'] # available actions
FRESH_TIME = 0.3
# Hyperparameters
EPSILON = 0.9 # greedy policy
ALPHA = 0.1 # learning rate
GAMMA = 0.9 # discount rate
MAX_EPISODES = 10 # maximum episodes in a episode
def create_q_table(n_states, actions):
table = pd.DataFrame(
np.zeros((n_states, len(actions))),
columns=actions
)
return table
def render(s, episode, step_counter):
env_list = ['-'] * (N_STATES -1) + ['T']
if s == "terminal":
interaction = 'Episode %s: total_steps = %s' % (
episode + 1, step_counter)
print('\r{}'.format(interaction), end='')
time.sleep(2)
print('\r ', end='')
else:
env_list[s] = "o"
interaction = "".join(env_list)
print(f"\r{step_counter} {interaction}")
time.sleep(FRESH_TIME)
def reset():
return 0
def draw():
return np.random.choice(ACTIONS)
def choose_action(s, q_table):
state_actions = q_table.iloc[s, :]
if np.random.uniform() > EPSILON or state_actions.all() == 0:
action_name = draw() # act non-greedy or state-action
else:
action_name = state_actions.argmax() # act greedy
return action_name
def step(action, state):
"""Run one timestep of the environment's dynamic
:param action: an action provided by the environment
:return: (observation, reward, done)
"""
observation = state
reward = 0
done = False
if action == "right":
if state == N_STATES - 2:
observation = "terminal"
reward = 1
done = True
else:
observation = state + 1
else:
# move left
if state != 0:
observation = state - 1
return observation, reward, done
def learn():
q = create_q_table(N_STATES, ACTIONS)
for episode in range(MAX_EPISODES):
step_counter = 0
state = reset()
done = False
render(state, episode, step_counter)
while not done:
action = choose_action(state, q)
observation, reward, done = step(action, state)
if observation != "terminal":
q_target = reward + GAMMA * q.iloc[observation, :].max()
else:
q_target = reward # next state is terminal
q_predict = q.ix[state, action]
q.ix[state, action] += ALPHA * (q_target - q_predict)
state = observation
render(state, episode, step_counter)
step_counter += 1
return q
if __name__ == "__main__":
q = learn()
print('\r\nQ-table:\n')
print(q)
| apache-2.0 |
evanbiederstedt/RRBSfun | trees/chrom_scripts/cll_chr15.py | 1 | 8247 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr15"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ['RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("cll_chr15.phy", header=None, index=None)
print(tott.shape)
| mit |
cheind/rgbd-correction | sensor_correction/apps/correct_depth.py | 1 | 3179 | __author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import glob
import os
import cv2
import numpy as np
import re
import matplotlib.pyplot as plt
import matplotlib
import time
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sensor_correction.gp_cpu import GPRegressor
from sensor_correction.utils import sensor_unproject, create_batches
def crop(img, border):
return img[border[1]:-border[1], border[0]:-border[0]]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate Gaussian Process')
parser.add_argument('regressor', type=str, help='Trained GP')
parser.add_argument('depth', type=str, help='Preprocessed depth')
parser.add_argument('intrinsics', type=str, help='Camera intrinsics')
parser.add_argument('--output', type=str, help='Result file', default='corrected_depths.npz')
parser.add_argument('--gpu', action='store_true', help='Use GPU')
args = parser.parse_args()
matplotlib.rcParams.update({'font.size': 20})
# Load depth data
data = np.load(args.depth)
temps = data['temps']
poses = data['poses']
all_depths_ir = data['depth_ir'][()]
all_depths_rgb = data['depth_rgb'][()]
h, w = all_depths_ir[(poses[0], temps[0])].shape
x = np.arange(0, w, 1)
y = np.arange(0, h, 1)
xx, yy = np.meshgrid(x, y)
xy = np.hstack((xx.reshape(-1,1), yy.reshape(-1,1)))
# Load regressor
r = GPRegressor()
r.load(args.regressor)
if args.gpu:
import tensorflow as tf
from sensor_correction.gp_gpu import GPRegressorGPU
sess = tf.Session()
xfeed = tf.placeholder(dtype=tf.float32, shape=[16384 ,4])
r = GPRegressorGPU(r, xfeed)
# Load intrinsics
K = np.loadtxt(args.intrinsics).reshape(3,3)
Kinv = np.linalg.inv(K)
all_depths = {}
all_deltae = {}
total_time = 0.
total_count = 0
for p in poses:
for t in temps:
print('Processing pos {}, temperature {}'.format(p, t))
depth_ir = all_depths_ir[(p, t)] # Actual
start_time = time.time()
xyz = sensor_unproject(xy, depth_ir.ravel(), Kinv)
xyzt = np.column_stack((xyz, np.ones(xyz.shape[0])*t))
batches = create_batches(xyzt, 16384, pad=True)
deltae = []
for b in batches:
if args.gpu:
br = sess.run(r.predict, feed_dict={xfeed : b})
else:
br = r.predict(b)
deltae.append(br)
deltae = np.concatenate(deltae)[:xyzt.shape[0]].reshape(depth_ir.shape)
depth_corr = depth_ir + deltae
total_time += (time.time() - start_time)
total_count += 1
all_deltae[(p, t)] = deltae
all_depths[(p, t)] = depth_corr
print('Processing took {:.3f}sec total, {:.3f}sec on average'.format(total_time, total_time / total_count))
np.savez(args.output, depth_corrected=all_depths, depth_deltae=all_deltae, temps=temps, poses=poses)
| bsd-3-clause |
waterponey/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 131 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
dinossimpson/pyspeckit | pyspeckit/wrappers/fith2co.py | 2 | 6619 | """
===================
H2CO fitter wrapper
===================
Wrapper to fit formaldehyde spectra.
"""
import pyspeckit
from matplotlib import pyplot
import copy
title_dict = {'oneone':'H$_2$CO 1$_{11}$-1$_{10}$',
'twotwo':'H$_2$CO 2$_{12}$-2$_{11}$',
'threethree':'H$_2$CO 3$_{23}$-3$_{22}$'
}
def plot_h2co(spdict, spectra, fignum=1, show_components=False,
show_hyperfine_components=False, residfignum=None, annotate=None,
clear=True, residkwargs={}, plot_fit_kwargs={}, residclear=True,
resid_overlay=False, resid_yoffsets=None,
**plotkwargs):
"""
Plot the results from a multi-h2co fit
"""
spectra.plotter.figure = pyplot.figure(fignum)
spectra.plotter.axis = spectra.plotter.figure.gca()
if clear:
spectra.plotter.figure.clf()
splist = spdict.values()
for sp in splist:
sp.xarr.convert_to_unit('km/s',quiet=True)
if hasattr(spectra.specfit,'fitter'):
sp.specfit.fitter = copy.copy(spectra.specfit.fitter)
sp.specfit.modelpars = spectra.specfit.modelpars
sp.specfit.npeaks = spectra.specfit.npeaks
sp.specfit.fitter.npeaks = spectra.specfit.npeaks
if spectra.specfit.modelpars is not None:
mf = sp.specfit.fitter.n_modelfunc
kw = spectra.specfit.fitter.modelfunc_kwargs
sp.specfit.model = mf(pars=spectra.specfit.modelpars,
**kw)(sp.xarr)
if len(splist) == 2:
axdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(212)}
elif len(splist) == 3:
axdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(223),
'threethree':pyplot.subplot(224)}
elif len(splist) == 4:
axdict = {'oneone':pyplot.subplot(221),
'twotwo':pyplot.subplot(222),
'threethree':pyplot.subplot(223),
'fourfour':pyplot.subplot(224)}
for linename,sp in spdict.iteritems():
sp.plotter.axis=axdict[linename] # permanent
sp.plotter(axis=axdict[linename],
title=title_dict[linename],
clear=clear,
**plotkwargs)
sp.specfit.Spectrum.plotter = sp.plotter
#sp.specfit.selectregion(reset=True)
if sp.specfit.modelpars is not None:
sp.specfit.plot_fit(annotate=False,
show_components=show_components,
show_hyperfine_components=show_hyperfine_components,
**plot_fit_kwargs)
sp.plotter.reset_limits()
if spdict['oneone'].specfit.modelpars is not None and annotate:
spdict['oneone'].specfit.annotate(labelspacing=0.05,
prop={'size':'small',
'stretch':'extra-condensed'},
frameon=False)
residaxdict = None
if residfignum is not None:
pyplot.figure(residfignum)
if residclear:
pyplot.clf()
if len(splist) == 2:
residaxdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(212)}
elif len(splist) == 3:
residaxdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(223),
'threethree':pyplot.subplot(224),
'fourfour':pyplot.subplot(224)}
elif len(splist) == 4:
residaxdict = {'oneone':pyplot.subplot(221),
'twotwo':pyplot.subplot(222),
'threethree':pyplot.subplot(223),
'fourfour':pyplot.subplot(224)}
elif resid_overlay:
residaxdict = axdict
residclear = False # override defaults...
residfignum = fignum
if residaxdict is not None:
for linename,sp in spdict.iteritems():
sp.specfit.Spectrum.plotter = sp.plotter
try:
yoffset = resid_yoffsets[linename]
except TypeError:
yoffset = 0.0
sp.specfit.plotresiduals(axis=residaxdict[linename],
figure=residfignum,
clear=residclear,
set_limits=False,
label=False,
yoffset=yoffset,
**residkwargs)
spectra.residaxdict = residaxdict
spectra.axisdict = axdict
spectra.plotter.axis = axdict['oneone']
spectra.specfit.fitleg = spdict['oneone'].specfit.fitleg
def BigSpectrum_to_H2COdict(sp, vrange=None):
"""
A rather complicated way to make the spdicts above given a spectrum...
"""
spdict = {}
for linename,freq in pyspeckit.spectrum.models.formaldehyde.central_freq_dict.iteritems():
if vrange is not None:
freq_test_low = freq - freq * vrange[0]/pyspeckit.units.speedoflight_kms
freq_test_high = freq - freq * vrange[1]/pyspeckit.units.speedoflight_kms
else:
freq_test_low = freq_test_high = freq
if (sp.xarr.as_unit('Hz').in_range(freq_test_low) or
sp.xarr.as_unit('Hz').in_range(freq_test_high)):
spdict[linename] = sp.copy()
spdict[linename].xarr.convert_to_unit('GHz')
spdict[linename].xarr.refX = freq
spdict[linename].xarr.refX_unit = 'Hz'
#spdict[linename].baseline = copy.copy(sp.baseline)
#spdict[linename].baseline.Spectrum = spdict[linename]
spdict[linename].specfit = sp.specfit.copy(parent=spdict[linename])
spdict[linename].xarr.convert_to_unit('km/s')
if vrange is not None:
try:
spdict[linename].crop(*vrange, units='km/s')
except IndexError:
# if the freq in range, but there's no data in range, remove
spdict.pop(linename)
return spdict
def plotter_override(sp, vrange=None, **kwargs):
"""
Do plot_h2co with syntax similar to plotter()
"""
spdict = BigSpectrum_to_H2COdict(sp, vrange=vrange)
if len(spdict) not in (2,3,4):
raise ValueError("Not enough lines; don't need to use the H2CO plot wrapper")
plot_h2co(spdict, sp, **kwargs)
return spdict
| mit |
huongttlan/statsmodels | statsmodels/graphics/tests/test_dotplot.py | 26 | 15330 | import numpy as np
from statsmodels.graphics.dotplots import dot_plot
import pandas as pd
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_all():
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_dotplot.pdf")
else:
pdf = None
# Basic dotplot with points only
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot")
close_or_save(pdf, fig)
# Basic vertical dotplot
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot")
close_or_save(pdf, fig)
# Tall and skinny
plt.figure(figsize=(4,12))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax)
ax.set_title("Tall and skinny dotplot")
ax.set_xlabel("x axis label")
close_or_save(pdf, fig)
# Short and wide
plt.figure(figsize=(12,4))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Short and wide dotplot")
ax.set_ylabel("y axis label")
close_or_save(pdf, fig)
# Tall and skinny striped dotplot
plt.figure(figsize=(4,12))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True)
ax.set_title("Tall and skinny striped dotplot")
ax.set_xlim(-10, 50)
close_or_save(pdf, fig)
# Short and wide striped
plt.figure(figsize=(12,4))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True, horizontal=False)
ax.set_title("Short and wide striped dotplot")
ax.set_ylim(-10, 50)
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot with few lines")
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot with few lines")
close_or_save(pdf, fig)
# Manually set the x axis limits
plt.figure()
ax = plt.axes()
points = np.arange(20)
fig = dot_plot(points, ax=ax)
ax.set_xlim(-10, 30)
ax.set_title("Dotplot with adjusted horizontal range")
close_or_save(pdf, fig)
# Left row labels
plt.clf()
ax = plt.axes()
lines = ["ABCDEFGH"[np.random.randint(0, 8)] for k in range(20)]
points = np.random.normal(size=20)
fig = dot_plot(points, lines=lines, ax=ax)
ax.set_title("Dotplot with user-supplied labels in the left margin")
close_or_save(pdf, fig)
# Left and right row labels
plt.clf()
ax = plt.axes()
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::")
ax.set_title("Dotplot with user-supplied labels in both margins")
close_or_save(pdf, fig)
# Both sides row labels
plt.clf()
ax = plt.axes([0.1, 0.1, 0.88, 0.8])
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
horizontal=False)
txt = ax.set_title("Vertical dotplot with user-supplied labels in both margins")
txt.set_position((0.5, 1.06))
close_or_save(pdf, fig)
# Custom colors and symbols
plt.clf()
ax = plt.axes([0.1, 0.07, 0.78, 0.85])
points = np.random.normal(size=20)
lines = np.kron(range(5), np.ones(4)).astype(np.int32)
styles = np.kron(np.ones(5), range(4)).astype(np.int32)
#marker_props = {k: {"color": "rgbc"[k], "marker": "osvp"[k],
# "ms": 7, "alpha": 0.6} for k in range(4)}
# python 2.6 compat, can be removed later
marker_props = dict((k, {"color": "rgbc"[k], "marker": "osvp"[k],
"ms": 7, "alpha": 0.6}) for k in range(4))
fig = dot_plot(points, lines=lines, styles=styles, ax=ax,
marker_props=marker_props)
ax.set_title("Dotplot with custom colors and symbols")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals
plt.clf()
ax = plt.axes()
points = range(20)
fig = dot_plot(points, intervals=np.ones(20), ax=ax)
ax.set_title("Dotplot with symmetric intervals")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals, pandas inputs.
plt.clf()
ax = plt.axes()
points = pd.Series(range(20))
intervals = pd.Series(np.ones(20))
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with symmetric intervals (Pandas inputs)")
close_or_save(pdf, fig)
# Basic dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Vertical dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax, horizontal=False)
ax.set_title("Vertical dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Dotplot with nonsymmetric intervals, adjust line properties
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for x in range(20)]
line_props = {0: {"color": "lightgrey",
"solid_capstyle": "round"}}
fig = dot_plot(points, intervals=intervals, line_props=line_props, ax=ax)
ax.set_title("Dotplot with custom line properties")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
styles_order=["Dog", "Cat"])
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
styles_order = ["Dog", "Cat"]
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
horizontal=False, styles_order=styles_order)
handles, labels = ax.get_legend_handles_labels()
lh = dict(zip(labels, handles))
handles = [lh[l] for l in styles_order]
leg = plt.figlegend(handles, styles_order, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, striped=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
plt.ylim(-20, 20)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax)
ax.set_title("Dotplot with sections")
close_or_save(pdf, fig)
# Vertical dotplot with sections
plt.clf()
ax = plt.axes([0.1,0.1,0.9,0.75])
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles,
sections=sections, ax=ax, horizontal=False)
txt = ax.set_title("Vertical dotplot with sections")
txt.set_position((0.5, 1.08))
close_or_save(pdf, fig)
# Reorder sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax,
section_order=["Byy", "Axx", "Czz"])
ax.set_title("Dotplot with sections in specified order")
close_or_save(pdf, fig)
# Reorder the lines.
plt.figure()
ax = plt.axes()
points = np.arange(4)
lines = ["A", "B", "C", "D"]
line_order = ["B", "C", "A", "D"]
fig = dot_plot(points, lines=lines, line_order=line_order, ax=ax)
ax.set_title("Dotplot with reordered lines")
close_or_save(pdf, fig)
# Format labels
plt.clf()
points = range(20)
lines = ["%d::%d" % (i, 100+i) for i in range(20)]
fmt_left = lambda x : "lft_" + x
fmt_right = lambda x : "rgt_" + x
ax = plt.axes()
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
fmt_left_name=fmt_left, fmt_right_name=fmt_right)
ax.set_title("Horizontal dotplot with name formatting")
close_or_save(pdf, fig)
# Right names only
plt.clf()
points = range(20)
lines = ["%d::%d" % (i, 100+i) for i in range(20)]
ax = plt.axes()
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
show_names="right")
ax.set_title("Show right names only")
close_or_save(pdf, fig)
# Dotplot with different numbers of points per line
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = []
ii = 0
while len(lines) < 40:
for k in range(np.random.randint(1, 4)):
lines.append(ii)
ii += 1
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with different numbers of points per line")
close_or_save(pdf, fig)
if pdf_output:
pdf.close()
| bsd-3-clause |
timsnyder/bokeh | examples/plotting/file/custom_tooltip.py | 2 | 1888 | import pandas as pd
from bokeh.plotting import figure, show
from bokeh.sampledata.periodic_table import elements
elements = elements.copy()
elements = elements[elements.group != "-"]
elements.sort_values('metal', inplace=True)
colormap = {
"alkali metal" : "#a6cee3",
"alkaline earth metal" : "#1f78b4",
"halogen" : "#fdbf6f",
"metal" : "#b2df8a",
"metalloid" : "#33a02c",
"noble gas" : "#bbbb88",
"nonmetal" : "#baa2a6",
"transition metal" : "#e08e79",
}
data=dict(
atomic_number=elements["atomic number"],
sym=elements["symbol"],
name=elements["name"],
atomic_mass = pd.to_numeric(elements['atomic mass'], errors="coerce"),
density=elements['density'],
metal=[x.title() for x in elements["metal"]],
type_color=[colormap[x] for x in elements["metal"]]
)
mass_format = '{0.00}'
TOOLTIPS = """
<div style="width: 62px; height: 62px; opacity: .8; padding: 5px; background-color: @type_color;>
<h1 style="margin: 0; font-size: 12px;"> @atomic_number </h1>
<h1 style="margin: 0; font-size: 24px;"><strong> @sym </strong></h1>
<p style=" margin: 0; font-size: 8px;"><strong> @name </strong></p>
<p style="margin: 0; font-size: 8px;"> @atomic_mass{mass_format} </p>
</div>
""".format(mass_format=mass_format)
p = figure(plot_width=900, plot_height=450, tooltips=TOOLTIPS, title='Densities by Atomic Mass')
p.background_fill_color = "#fafafa"
p.circle('atomic_mass', 'density', size=12, source=data, color='type_color',
line_color="black", legend='metal', alpha=0.9)
p.legend.glyph_width = 30
p.legend.glyph_height = 30
p.xaxis.axis_label= 'Atomic Mass'
p.yaxis.axis_label= 'Density'
p.xgrid.grid_line_color = None
p.toolbar_location = None
l = p.legend[0]
p.add_layout(l, 'right')
l.border_line_color = None
show(p)
| bsd-3-clause |
abhiatgithub/shogun-toolbox | applications/easysvm/tutpaper/svm_params.py | 26 | 12935 |
#from matplotlib import rc
#rc('text', usetex=True)
fontsize = 16
contourFontsize = 12
showColorbar = False
xmin = -1
xmax = 1
ymin = -1.05
ymax = 1
import sys,os
import numpy
import shogun
from shogun.Kernel import GaussianKernel, LinearKernel, PolyKernel
from shogun.Features import RealFeatures, BinaryLabels
from shogun.Classifier import LibSVM
from numpy import arange
import matplotlib
from matplotlib import pylab
pylab.rcParams['contour.negative_linestyle'] = 'solid'
def features_from_file(fileName) :
fileHandle = open(fileName)
fileHandle.readline()
features = []
labels = []
for line in fileHandle :
tokens = line.split(',')
labels.append(float(tokens[1]))
features.append([float(token) for token in tokens[2:]])
return RealFeatures(numpy.transpose(numpy.array(features))), features, BinaryLabels(numpy.array(labels,numpy.float))
def create_kernel(kname, features, kparam=None) :
if kname == 'gauss' :
kernel = GaussianKernel(features, features, kparam)
elif kname == 'linear':
kernel = LinearKernel(features, features)
elif kname == 'poly' :
kernel = PolyKernel(features, features, kparam, True, False)
return kernel
def svm_train(kernel, labels, C1, C2=None):
"""Trains a SVM with the given kernel"""
num_threads = 1
kernel.io.disable_progress()
svm = LibSVM(C1, kernel, labels)
if C2:
svm.set_C(C1, C2)
svm.parallel.set_num_threads(num_threads)
svm.io.disable_progress()
svm.train()
return svm
def svm_test(svm, kernel, features_train, features_test) :
"""predicts on the test examples"""
kernel.init(features_train, features_test)
output = svm.apply().get_labels()
return output
def decision_boundary_plot(svm, features, vectors, labels, kernel, fileName = None, **args) :
title = None
if 'title' in args :
title = args['title']
xlabel = None
if 'xlabel' in args :
xlabel = args['xlabel']
ylabel = None
if 'ylabel' in args :
ylabel = args['ylabel']
fontsize = 'medium'
if 'fontsize' in args :
fontsize = args['fontsize']
contourFontsize = 10
if 'contourFontsize' in args :
contourFontsize = args['contourFontsize']
showColorbar = True
if 'showColorbar' in args :
showColorbar = args['showColorbar']
show = True
if fileName is not None :
show = False
if 'show' in args :
show = args['show']
# setting up the grid
delta = 0.005
x = arange(xmin, xmax, delta)
y = arange(ymin, ymax, delta)
Z = numpy.zeros((len(x), len(y)), numpy.float_)
gridX = numpy.zeros((len(x) *len(y), 2), numpy.float_)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
gridX[n][0] = x[i]
gridX[n][1] = y[j]
n += 1
if kernel.get_name() == 'Linear' and 'customwandb' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
kernel.set_w(args['customwandb'][0])
svm.set_bias(args['customwandb'][1])
if kernel.get_name() == 'Linear' and 'drawarrow' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
s=1.0/numpy.dot(w,w)/1.17
pylab.arrow(0,-b/w[1], w[0]*s,s*w[1], width=0.01, fc='#dddddd', ec='k')
grid_features = RealFeatures(numpy.transpose(gridX))
results = svm_test(svm, kernel, features, grid_features)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
Z[i][j] = results[n]
n += 1
cdict = {'red' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'green':((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'blue' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
}
my_cmap = matplotlib.colors.LinearSegmentedColormap('lightgray',cdict,256)
im = pylab.imshow(numpy.transpose(Z),
interpolation='bilinear', origin='lower',
cmap=my_cmap, extent=(xmin,xmax,ymin,ymax) )
if 'decisionboundaryonly' in args:
C1 = pylab.contour(numpy.transpose(Z),
[0],
origin='lower',
linewidths=(3),
colors = ['k'],
extent=(xmin,xmax,ymin,ymax))
else:
C1 = pylab.contour(numpy.transpose(Z),
[-1,0,1],
origin='lower',
linewidths=(1,3,1),
colors = ['k','k'],
extent=(xmin,xmax,ymin,ymax))
pylab.clabel(C1,
inline=1,
fmt='%1.1f',
fontsize=contourFontsize)
# plot the data
lab=labels.get_labels()
vec=numpy.array(vectors)
idx=numpy.where(lab==-1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=300, c='#4444ff', marker='o', alpha=0.8, zorder=100)
idx=numpy.where(lab==+1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=500, c='#ff4444', marker='s', alpha=0.8, zorder=100)
# plot SVs
if not 'decisionboundaryonly' in args:
training_outputs = svm_test(svm, kernel, features, features)
sv_idx=numpy.where(abs(training_outputs)<=1.01)[0]
pylab.scatter(vec[sv_idx,0], vec[sv_idx,1], s=100, c='k', marker='o', alpha=0.8, zorder=100)
if 'showmovedpoint' in args:
x=-0.779838709677
y=-0.1375
pylab.scatter([x], [y], s=300, c='#4e4e61', marker='o', alpha=1, zorder=100, edgecolor='#454548')
pylab.arrow(x,y-0.1, 0, -0.8/1.5, width=0.01, fc='#dddddd', ec='k')
#pylab.show()
if title is not None :
pylab.title(title, fontsize=fontsize)
if ylabel:
pylab.ylabel(ylabel,fontsize=fontsize)
if xlabel:
pylab.xlabel(xlabel,fontsize=fontsize)
if showColorbar :
pylab.colorbar(im)
# colormap:
pylab.hot()
if fileName is not None :
pylab.savefig(fileName)
if show :
pylab.show()
def add_percent_ticks():
ticks=pylab.getp(pylab.gca(),'xticks')
ticklabels=len(ticks)*['']
ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), xticklabels=ticklabels)
pylab.setp(pylab.gca(), yticklabels=['0%','100%'])
ticks=pylab.getp(pylab.gca(),'yticks')
ticklabels=len(ticks)*['']
#ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), yticklabels=ticklabels)
xticklabels = pylab.getp(pylab.gca(), 'xticklabels')
yticklabels = pylab.getp(pylab.gca(), 'yticklabels')
pylab.setp(xticklabels, fontsize=fontsize)
pylab.setp(yticklabels, fontsize=fontsize)
def create_figures(extension = 'pdf', directory = '../../tex/figures') :
if extension[0] != '.' :
extension = '.' + extension
dpi=90
# data and linear decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 0.7)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Linear Separation", customwandb=(numpy.array([-0.05, -1.0]), -0.3),
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar, decisionboundaryonly=True)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_linear_classifier' + extension))
pylab.close()
#####################################################################################
# data and svm decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Maximum Margin Separation", drawarrow=True,
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_svm_classifier' + extension))
pylab.close()
#####################################################################################
# the effect of C on the decision surface:
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(16,6), dpi=dpi)
pylab.subplot(121)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 200)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=200', ylabel="GC Content Before 'AG'",
xlabel="GC Content After 'AG'", fontsize=fontsize,
contourFontsize=contourFontsize, show=False, showmovedpoint=True,
showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(122)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 2)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=2',
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False, showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'effect_of_c' + extension))
pylab.close()
####################################################################################
# playing with nonlinear data:
# the effect of kernel parameters
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Linear Kernel',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
kernel = create_kernel('poly', features, 2)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=2',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
kernel = create_kernel('poly', features, 5)
svm = svm_train(kernel, labels, 10)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=5',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_polynomial' + extension))
pylab.close()
####################################################################################
#effects of sigma
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
gamma = 0.1
sigma = 20.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=20',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
sigma = 1.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=1',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
sigma = 0.05
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=0.05',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_gaussian' + extension))
pylab.close()
####################################################################################
if __name__ == '__main__' :
extension = 'pdf'
if len(sys.argv) > 1 :
extension = sys.argv[1]
pylab.ioff()
create_figures(extension)
| gpl-3.0 |
neuroidss/nupic.research | projects/l2_pooling/plot_capacity_result.py | 7 | 5539 | import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
from capacity_test import _prepareResultsDir, plotResults
from capacity_test import DEFAULT_RESULT_DIR_NAME, DEFAULT_PLOT_DIR_NAME
plt.ion()
if __name__ == "__main__":
numCorticalColumns = 1
confusionThreshold = 30
l4ColumnCountList = [256, 256, 512]
numInputBitsList = [12, 5, 10]
resultDirName=DEFAULT_RESULT_DIR_NAME
plotDirName=DEFAULT_PLOT_DIR_NAME
DEFAULT_RESULT_DIR_NAME = "results"
DEFAULT_PLOT_DIR_NAME = "plots"
DEFAULT_COLORS = ("b", "r", "c", "g", 'm')
# Plot capacity vs L4 size
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
expParams.append(
{'l4Column': 200, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
expParams.append(
{'l4Column': 250, 'externalInputSize': 2400, 'w': 20, 'sample': 6,
'thresh': 3})
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle(
"Varying number of objects ({} cortical column{})"
.format(numCorticalColumns, "s" if numCorticalColumns > 1 else ""
), fontsize="x-large"
)
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expname = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"])
resultFileName = _prepareResultsDir("{}.csv".format(expname),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti], confusionThreshold, 0)
ploti += 1
legendEntries.append("L4 mcs {} w {} s {} thresh {}".format(
expParam["l4Column"], expParam['w'], expParam['sample'],
expParam['thresh']))
ax[0, 0].legend(legendEntries, loc=4, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_varying_object_num_l4size_summary.pdf"
)
)
# Plot capacity vs L2 size
expParams = []
expParams.append(
{'L2cellCount': 2048, 'L2activeBits': 40, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 4096, 'L2activeBits': 40, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
expParams.append(
{'L2cellCount': 6144, 'L2activeBits': 40, 'w': 10, 'sample': 6, 'thresh': 3,
'l2Column': 1})
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of objects", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l2Cells_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["L2cellCount"],
expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti], confusionThreshold, 0)
ploti += 1
legendEntries.append("L2 cells {}/{} #cc {} ".format(
expParam['L2activeBits'], expParam['L2cellCount'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
fig.tight_layout()
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_vs_L2size.pdf"
)
)
# Plot capacity vs number of cortical columns
expParams = []
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 10, 'sample': 6,
'thresh': 3, 'l2Column': 1})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 10, 'sample': 6,
'thresh': 3, 'l2Column': 2})
expParams.append(
{'l4Column': 150, 'externalInputSize': 2400, 'w': 10, 'sample': 6,
'thresh': 3, 'l2Column': 3})
# plot result
ploti = 0
fig, ax = plt.subplots(2, 2)
st = fig.suptitle("Varying number of columns", fontsize="x-large")
for axi in (0, 1):
for axj in (0, 1):
ax[axi][axj].xaxis.set_major_locator(ticker.MultipleLocator(100))
legendEntries = []
for expParam in expParams:
expName = "multiple_column_capacity_varying_object_num_synapses_{}_thresh_{}_l4column_{}_l2column_{}".format(
expParam['sample'], expParam['thresh'], expParam["l4Column"],
expParam['l2Column'])
resultFileName = _prepareResultsDir("{}.csv".format(expName),
resultDirName=resultDirName
)
result = pd.read_csv(resultFileName)
plotResults(result, ax, "numObjects", None, DEFAULT_COLORS[ploti], confusionThreshold, 0)
ploti += 1
legendEntries.append("L4 mcs {} #cc {} ".format(
expParam['l4Column'], expParam['l2Column']))
ax[0, 0].legend(legendEntries, loc=3, fontsize=8)
# shift subplots down:
st.set_y(0.95)
fig.subplots_adjust(top=0.85)
plt.savefig(
os.path.join(
plotDirName,
"capacity_vs_num_columns.pdf"
)
) | agpl-3.0 |
droundy/deft | papers/water-saft/figs/single-rod-HB-density.py | 1 | 1339 | #!/usr/bin/env python
#need this to run without xserver
import matplotlib
#matplotlib.use('Agg')
import math
import matplotlib.pyplot as pyplot
import numpy
import pylab
newcalc = ""
hughescalc = "hughes-"
nm = 18.8972613
gpermL=4.9388942e-3/0.996782051315 # conversion from atomic units to mass density
colors = ["#44dd55", "#3377aa", "#002288"]
radii = [ 0.2, 0.6, 1.0 ]
for i in range(len(radii)):
newdata = pylab.loadtxt('figs/single-rod-slice-%04.2f.dat' % (2*radii[i]))
hugdata = pylab.loadtxt('figs/hughes-single-rod-slice-%04.2f.dat' % (2*radii[i]))
rnew = newdata[:, 0]/nm
rhug = hugdata[:, 0]/nm
newdensity = newdata[:, 1]/gpermL
newHB = 4*(1-newdata[:, 2])
#hugHB = 4*(1-hugdata[:,2])
bulkHB = 4*(1-newdata[len(newdata) - 2, 2])
#brokenHB = (bulkHB - newHB)*newdensity
#brokenHB[newdensity<0.01] = 0
newdenstotal = 0
#for j in range(len(rnew)):
# newdenstotal += math.pi*(rnew[j+1]**2 - rnew[j]**2)*newdensity
pylab.plot(rnew, brokenHB, color = colors[i], linestyle='-')
#pylab.plot(rnew, newdensity, color = colors[i], linestyle='--')
#pylab.plot(rhug, hugHB*hugdensity, color = colors[i], linestyle='--')
#plot properties
pyplot.ylabel('')
pyplot.xlabel('Radius (nm)')
pyplot.xlim(0, 1.3)
pyplot.savefig('figs/single-rod-HB-density.pdf')
pyplot.show()
| gpl-2.0 |
wangming28/syzygy | third_party/numpy/files/numpy/lib/npyio.py | 16 | 61927 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
else:
fh = iter(open(fname, 'U'))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], tuple)]
if len(shape) > 1:
for dim in dt.shape[-2:0:-1]:
packing = [(dim*packing[0][0],packing*dim)]
packing = packing*shape[0]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
packing.append((len(flat_dt),flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing == None:
return items[0]
elif packing is tuple:
return tuple(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.next()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = fh.next()
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError("fname mustbe a string, filehandle, or generator. "\
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(\
"The use of `skiprows` is deprecated, it will be removed in numpy 2.0.\n" \
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.next()
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = fhd.next()
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# might want to return empty array instead of raising error.
raise IOError('End-of-file reached before encountering data.')
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(\
"The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| apache-2.0 |
WheatonCS/Lexos | lexos/views/multicloud.py | 1 | 1944 | import json
import pandas as pd
from flask import Blueprint, request
from lexos.managers import utility
from lexos.views.base import render
multicloud_blueprint = Blueprint("multicloud", __name__)
@multicloud_blueprint.route("/multicloud", methods=["GET"])
def multicloud() -> str:
""" Gets the multicloud page.
:return: The multicloud page.
"""
return render("multicloud.html")
@multicloud_blueprint.route("/multicloud/get-word-counts", methods=["POST"])
def get_word_counts() -> str:
""" Gets the top 100 word counts for each active file.
:return: The top 100 word counts for each active file.
"""
file_manager = utility.load_file_manager()
# Get the contents of the active documents
response = []
for file in file_manager.files.values():
if file.active:
response.append({"name": file.label,
"words": get_word_counts_single_file(
file.load_contents())})
return json.dumps(response)
def get_word_counts_single_file(contents) -> list:
""" Gets the top 100 word counts for the given contents.
:param contents: The words to count.
:return: The top 100 word counts.
"""
# Get a sorted dataframe of word counts
dtm, words = utility.simple_vectorizer([contents], "word", 1)
dataframe = pd.DataFrame({"word": words, "count": dtm[0]})
dataframe = dataframe.sort_values(by="count", ascending=False)
# Create a list of the top 100 words and their normalized counts
top_words = []
maximum_top_words = int(request.get_json()["maximum_top_words"])
maximum = dataframe.iloc[0]["count"]
dataframe = dataframe[:maximum_top_words]
for i in range(len(dataframe)):
top_words.append([dataframe.iloc[i]["word"],
str(dataframe.iloc[i]["count"]),
dataframe.iloc[i]["count"]/maximum])
return top_words
| mit |
hlin117/statsmodels | statsmodels/tsa/statespace/tests/test_sarimax.py | 9 | 63059 | """
Tests for SARIMAX models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import warnings
from statsmodels.tsa.statespace import sarimax, tools
from statsmodels.tsa import arima_model as arima
from .results import results_sarimax
from statsmodels.tools import add_constant
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
coverage_path = 'results' + os.sep + 'results_sarimax_coverage.csv'
coverage_results = pd.read_csv(current_path + os.sep + coverage_path)
class TestSARIMAXStatsmodels(object):
"""
Test ARIMA model using SARIMAX class against statsmodels ARIMA class
"""
def __init__(self):
self.true = results_sarimax.wpi1_stationary
endog = self.true['data']
self.model_a = arima.ARIMA(endog, order=(1, 1, 1))
self.result_a = self.model_a.fit(disp=-1)
self.model_b = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
simple_differencing=True,
hamilton_representation=True)
self.result_b = self.model_b.fit(disp=-1, cov_type='oim')
def test_loglike(self):
assert_allclose(self.result_b.llf, self.result_a.llf)
def test_aic(self):
assert_allclose(self.result_b.aic, self.result_a.aic)
def test_bic(self):
assert_allclose(self.result_b.bic, self.result_a.bic)
def test_hqic(self):
assert_allclose(self.result_b.hqic, self.result_a.hqic)
def test_mle(self):
# ARIMA estimates the mean of the process, whereas SARIMAX estimates
# the intercept. Convert the mean to intercept to compare
params_a = self.result_a.params
params_a[0] = (1 - params_a[1]) * params_a[0]
assert_allclose(self.result_b.params[:-1], params_a, atol=5e-5)
def test_bse(self):
# Make sure the default type is OIM for this example
assert(self.result_b.cov_type == 'oim')
# Test the OIM BSE values
assert_allclose(
self.result_b.bse[1:-1],
self.result_a.bse[1:],
atol=1e-2
)
def test_t_test(self):
import statsmodels.tools._testing as smt
#self.result_b.pvalues
#self.result_b._cache['pvalues'] += 1 # use to trigger failure
smt.check_ttest_tvalues(self.result_b)
smt.check_ftest_pvalues(self.result_b)
class SARIMAXStataTests(object):
def test_loglike(self):
assert_almost_equal(
self.result.llf,
self.true['loglike'], 4
)
def test_aic(self):
assert_almost_equal(
self.result.aic,
self.true['aic'], 3
)
def test_bic(self):
assert_almost_equal(
self.result.bic,
self.true['bic'], 3
)
def test_hqic(self):
hqic = (
-2*self.result.llf +
2*np.log(np.log(self.result.nobs)) *
self.result.params.shape[0]
)
assert_almost_equal(
self.result.hqic,
hqic, 3
)
class ARIMA(SARIMAXStataTests):
"""
ARIMA model
Stata arima documentation, Example 1
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = true['data']
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
*args, **kwargs)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
self.result = self.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestARIMAStationary(ARIMA):
def __init__(self):
super(TestARIMAStationary, self).__init__(
results_sarimax.wpi1_stationary
)
def test_bse(self):
# Default covariance type (OPG)
assert_allclose(
self.result.bse[1], self.true['se_ar_opg'],
atol=1e-3,
)
assert_allclose(
self.result.bse[2], self.true['se_ma_opg'],
atol=1e-3,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[1], self.true['se_ar_oim'],
atol=1e-3,
)
assert_allclose(
oim_bse[2], self.true['se_ma_oim'],
atol=1e-2,
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[2], self.true['se_ma_oim'],
atol=1e-2,
)
def test_bse_robust(self):
robust_oim_bse = self.result.cov_params_robust_oim.diagonal()**0.5
robust_cs_bse = self.result.cov_params_robust_cs.diagonal()**0.5
true_robust_bse = np.r_[
self.true['se_ar_robust'], self.true['se_ma_robust']
]
assert_allclose(
robust_oim_bse[1:3], true_robust_bse,
atol=1e-2,
)
assert_allclose(
robust_cs_bse[1:3], true_robust_bse,
atol=1e-1,
)
class TestARIMADiffuse(ARIMA):
def __init__(self, **kwargs):
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = (
results_sarimax.wpi1_diffuse['initial_variance']
)
super(TestARIMADiffuse, self).__init__(results_sarimax.wpi1_diffuse,
**kwargs)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[1], self.true['se_ar_opg'],
atol=1e-1,
)
assert_allclose(
self.result.bse[2], self.true['se_ma_opg'],
atol=1e-1, rtol=1e-1
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[2], self.true['se_ma_oim'],
atol=1e-2, rtol=1e-1
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[2], self.true['se_ma_oim'],
atol=1e-2, rtol=1e-1
)
class AdditiveSeasonal(SARIMAXStataTests):
"""
ARIMA model with additive seasonal effects
Stata arima documentation, Example 2
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, order=(1, 1, (1, 0, 0, 1)), trend='c', *args, **kwargs
)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
self.result = self.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestAdditiveSeasonal(AdditiveSeasonal):
def __init__(self):
super(TestAdditiveSeasonal, self).__init__(
results_sarimax.wpi1_seasonal
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[1], self.true['se_ar_opg'],
atol=1e-3,
)
assert_allclose(
self.result.bse[2:4], self.true['se_ma_opg'],
atol=1e-3,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[2:4], self.true['se_ma_oim'],
atol=1e-1
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[1], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[2:4], self.true['se_ma_oim'],
atol=1e-1
)
class Airline(SARIMAXStataTests):
"""
Multiplicative SARIMA model: "Airline" model
Stata arima documentation, Example 3
"""
def __init__(self, true, *args, **kwargs):
self.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, order=(0, 1, 1), seasonal_order=(0, 1, 1, 12),
trend='n', *args, **kwargs
)
params = np.r_[true['params_ma'], true['params_seasonal_ma'],
true['params_variance']]
self.result = self.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-4
)
class TestAirlineHamilton(Airline):
def __init__(self):
super(TestAirlineHamilton, self).__init__(
results_sarimax.air2_stationary
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_ma_opg'],
atol=1e-4,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-3,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
class TestAirlineHarvey(Airline):
def __init__(self):
super(TestAirlineHarvey, self).__init__(
results_sarimax.air2_stationary, hamilton_representation=False
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_ma_opg'],
atol=1e-3,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-3,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
def test_bse_cs(self):
# OIM covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
class TestAirlineStateDifferencing(Airline):
def __init__(self):
super(TestAirlineStateDifferencing, self).__init__(
results_sarimax.air2_stationary, simple_differencing=False,
hamilton_representation=False
)
def test_bic(self):
# Due to diffuse component of the state (which technically changes the
# BIC calculation - see Durbin and Koopman section 7.4), this is the
# best we can do for BIC
assert_almost_equal(
self.result.bic,
self.true['bic'], 0
)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_ma_opg'],
atol=1e-3,
)
assert_allclose(
self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-4,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_ma_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[1], self.true['se_seasonal_ma_oim'],
atol=1e-1
)
class Friedman(SARIMAXStataTests):
"""
ARMAX model: Friedman quantity theory of money
Stata arima documentation, Example 4
"""
def __init__(self, true, exog=None, *args, **kwargs):
self.true = true
endog = np.r_[true['data']['consump']]
if exog is None:
exog = add_constant(true['data']['m2'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(
endog, exog=exog, order=(1, 0, 1), *args, **kwargs
)
params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
self.result = self.model.filter(params)
class TestFriedmanMLERegression(Friedman):
def __init__(self):
super(TestFriedmanMLERegression, self).__init__(
results_sarimax.friedman2_mle
)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-2, rtol=1e-3
)
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_exog_opg'][0],
rtol=1e-1
)
assert_allclose(
self.result.bse[1], self.true['se_exog_opg'][1],
atol=1e-2,
)
assert_allclose(
self.result.bse[2], self.true['se_ar_opg'],
atol=1e-2,
)
assert_allclose(
self.result.bse[3], self.true['se_ma_opg'],
atol=1e-2,
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_exog_oim'][0],
rtol=1e-1
)
assert_allclose(
oim_bse[1], self.true['se_exog_oim'][1],
atol=1e-2,
)
assert_allclose(
oim_bse[2], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
oim_bse[3], self.true['se_ma_oim'],
atol=1e-2,
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_exog_oim'][0],
rtol=1e-1
)
assert_allclose(
cs_bse[1], self.true['se_exog_oim'][1],
atol=1e-2,
)
assert_allclose(
cs_bse[2], self.true['se_ar_oim'],
atol=1e-2,
)
assert_allclose(
cs_bse[3], self.true['se_ma_oim'],
atol=1e-2,
)
class TestFriedmanStateRegression(Friedman):
def __init__(self):
# Remove the regression coefficients from the parameters, since they
# will be estimated as part of the state vector
true = dict(results_sarimax.friedman2_mle)
exog = add_constant(true['data']['m2']) / 10.
true['mle_params_exog'] = true['params_exog'][:]
true['mle_se_exog'] = true['se_exog_opg'][:]
true['params_exog'] = []
true['se_exog'] = []
super(TestFriedmanStateRegression, self).__init__(
true, exog=exog, mle_regression=False
)
self.true_params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
self.result = self.model.filter(self.true_params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-1, rtol=1e-1
)
def test_regression_parameters(self):
# The regression effects are integrated into the state vector as
# the last two states (thus the index [-2:]). The filtered
# estimates of the state vector produced by the Kalman filter and
# stored in `filtered_state` for these state elements give the
# recursive least squares estimates of the regression coefficients
# at each time period. To get the estimates conditional on the
# entire dataset, use the filtered states from the last time
# period (thus the index [-1]).
assert_almost_equal(
self.result.filter_results.filtered_state[-2:, -1] / 10.,
self.true['mle_params_exog'], 1
)
# Loglikelihood (and so aic, bic) is slightly different when states are
# integrated into the state vector
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_bse(self):
# Make sure the default type is OPG
assert_equal(self.result.cov_type, 'opg')
# Test the OPG BSE values
assert_allclose(
self.result.bse[0], self.true['se_ar_opg'],
atol=1e-2
)
assert_allclose(
self.result.bse[1], self.true['se_ma_opg'],
atol=1e-2
)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(
oim_bse[0], self.true['se_ar_oim'],
atol=1e-1,
)
assert_allclose(
oim_bse[1], self.true['se_ma_oim'],
atol=1e-2, rtol=1e-2
)
def test_bse_cs(self):
# CS covariance type
cs_bse = self.result.cov_params_cs.diagonal()**0.5
assert_allclose(
cs_bse[0], self.true['se_ar_oim'],
atol=1e-1,
)
assert_allclose(
cs_bse[1], self.true['se_ma_oim'],
atol=1e-2, rtol=1e-2
)
class TestFriedmanPredict(Friedman):
"""
ARMAX model: Friedman quantity theory of money, prediction
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This follows the given Stata example, although it is not truly forecasting
because it compares using the actual data (which is available in the
example but just not used in the parameter MLE estimation) against dynamic
prediction of that data. Here `test_predict` matches the first case, and
`test_dynamic_predict` matches the second.
"""
def __init__(self):
super(TestFriedmanPredict, self).__init__(
results_sarimax.friedman2_predict
)
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_predict(self):
assert_almost_equal(
self.result.predict()[0],
self.true['predict'], 3
)
def test_dynamic_predict(self):
dynamic = len(self.true['data']['consump'])-15-1
assert_almost_equal(
self.result.predict(dynamic=dynamic)[0],
self.true['dynamic_predict'], 3
)
class TestFriedmanForecast(Friedman):
"""
ARMAX model: Friedman quantity theory of money, forecasts
Variation on:
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This is a variation of the Stata example, in which the endogenous data is
actually made to be missing so that the predict command must forecast.
As another unit test, we also compare against the case in State when
predict is used against missing data (so forecasting) with the dynamic
option also included. Note, however, that forecasting in State space models
amounts to running the Kalman filter against missing datapoints, so it is
not clear whether "dynamic" forecasting (where instead of missing
datapoints for lags, we plug in previous forecasted endog values) is
meaningful.
"""
def __init__(self):
true = dict(results_sarimax.friedman2_predict)
true['forecast_data'] = {
'consump': true['data']['consump'][-15:],
'm2': true['data']['m2'][-15:]
}
true['data'] = {
'consump': true['data']['consump'][:-15],
'm2': true['data']['m2'][:-15]
}
super(TestFriedmanForecast, self).__init__(true)
self.result = self.model.filter(self.result.params)
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_forecast(self):
end = len(self.true['data']['consump'])+15-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, exog=exog)[0],
self.true['forecast'], 3
)
def test_dynamic_forecast(self):
end = len(self.true['data']['consump'])+15-1
dynamic = len(self.true['data']['consump'])-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, dynamic=dynamic, exog=exog)[0],
self.true['dynamic_forecast'], 3
)
class SARIMAXCoverageTest(object):
def __init__(self, i, decimal=4, endog=None, *args, **kwargs):
# Dataset
if endog is None:
endog = results_sarimax.wpi1_data
# Loglikelihood, parameters
self.true_loglike = coverage_results.loc[i]['llf']
self.true_params = np.array([float(x) for x in coverage_results.loc[i]['parameters'].split(',')])
# Stata reports the standard deviation; make it the variance
self.true_params[-1] = self.true_params[-1]**2
# Test parameters
self.decimal = decimal
# Compare using the Hamilton representation and simple differencing
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
self.model = sarimax.SARIMAX(endog, *args, **kwargs)
def test_loglike(self):
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=0.7 * 10**(-self.decimal)
)
def test_start_params(self):
# just a quick test that start_params isn't throwing an exception
# (other than related to invertibility)
self.model.enforce_stationarity = False
self.model.enforce_invertibility = False
self.model.start_params
self.model.enforce_stationarity = True
self.model.enforce_invertibility = True
def test_transform_untransform(self):
true_constrained = self.true_params
# Sometimes the parameters given by Stata are not stationary and / or
# invertible, so we need to skip those transformations for those
# parameter sets
self.model.update(self.true_params)
contracted_polynomial_seasonal_ar = self.model.polynomial_seasonal_ar[self.model.polynomial_seasonal_ar.nonzero()]
self.model.enforce_stationarity = (
(self.model.k_ar == 0 or tools.is_invertible(np.r_[1, -self.model.polynomial_ar[1:]])) and
(len(contracted_polynomial_seasonal_ar) <= 1 or tools.is_invertible(np.r_[1, -contracted_polynomial_seasonal_ar[1:]]))
)
contracted_polynomial_seasonal_ma = self.model.polynomial_seasonal_ma[self.model.polynomial_seasonal_ma.nonzero()]
self.model.enforce_invertibility = (
(self.model.k_ma == 0 or tools.is_invertible(np.r_[1, -self.model.polynomial_ma[1:]])) and
(len(contracted_polynomial_seasonal_ma) <= 1 or tools.is_invertible(np.r_[1, -contracted_polynomial_seasonal_ma[1:]]))
)
unconstrained = self.model.untransform_params(true_constrained)
constrained = self.model.transform_params(unconstrained)
assert_almost_equal(constrained, true_constrained, 4)
self.model.enforce_stationarity = True
self.model.enforce_invertibility = True
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# And make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
self.result.cov_params_cs
# Some of the below models have non-invertible parameters, which causes
# problems with the reverse parameter transformation used in the
# `cov_params_delta` procedure. This is unavoidable with these types of
# parameters, and should not be considered a failure.
try:
self.result.cov_params_delta
except np.linalg.LinAlgError:
pass
except ValueError:
pass
self.result.cov_params_oim
self.result.cov_params_opg
def test_predict(self):
result = self.model.filter(self.true_params)
# Test predict does not throw exceptions, and produces the right shaped
# output
predict = result.predict()
assert_equal(predict.shape, (1, self.model.nobs))
predict = result.predict(start=10, end=20)
assert_equal(predict.shape, (1, 11))
predict = result.predict(start=10, end=20, dynamic=10)
assert_equal(predict.shape, (1, 11))
# Test forecasts
if self.model.k_exog == 0:
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10)
assert_equal(predict.shape, (1, 11))
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10)
forecast = result.forecast()
assert_equal(forecast.shape, (1, 1))
forecast = result.forecast(10)
assert_equal(forecast.shape, (1, 10))
else:
exog = np.r_[[0]*self.model.k_exog*11].reshape(11, self.model.k_exog)
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10,
exog=exog)
assert_equal(predict.shape, (1, 11))
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10,
exog=exog)
exog = np.r_[[0]*self.model.k_exog].reshape(1, self.model.k_exog)
forecast = result.forecast(exog=exog)
assert_equal(forecast.shape, (1, 1))
def test_init_keys_replicate(self):
mod1 = self.model
kwargs = self.model._get_init_kwds()
endog = mod1.data.orig_endog
exog = mod1.data.orig_exog
model2 = sarimax.SARIMAX(endog, exog, **kwargs)
res1 = self.model.filter(self.true_params)
res2 = model2.filter(self.true_params)
assert_allclose(res2.llf, res1.llf, rtol=1e-13)
class Test_ar(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
super(Test_ar, self).__init__(0, *args, **kwargs)
class Test_ar_as_polynomial(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = ([1,1,1],0,0)
super(Test_ar_as_polynomial, self).__init__(0, *args, **kwargs)
class Test_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3,0,0) noconstant vce(oim)
# save_results 2
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = 'c'
super(Test_ar_trend_c, self).__init__(1, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[0] = (1 - self.true_params[1:4].sum()) * self.true_params[0]
class Test_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3,0,0) noconstant vce(oim)
# save_results 3
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = 'ct'
super(Test_ar_trend_ct, self).__init__(2, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(3,0,0) noconstant vce(oim)
# save_results 4
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['trend'] = [1,0,0,1]
super(Test_ar_trend_polynomial, self).__init__(3, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_ar_diff(SARIMAXCoverageTest):
# // AR and I(d): (p,d,0) x (0,0,0,0)
# arima wpi, arima(3,2,0) noconstant vce(oim)
# save_results 5
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,0)
super(Test_ar_diff, self).__init__(4, *args, **kwargs)
class Test_ar_seasonal_diff(SARIMAXCoverageTest):
# // AR and I(D): (p,0,0) x (0,D,0,s)
# arima wpi, arima(3,0,0) sarima(0,2,0,4) noconstant vce(oim)
# save_results 6
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_ar_seasonal_diff, self).__init__(5, *args, **kwargs)
class Test_ar_diffuse(SARIMAXCoverageTest):
# // AR and diffuse initialization
# arima wpi, arima(3,0,0) noconstant vce(oim) diffuse
# save_results 7
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_ar_diffuse, self).__init__(6, *args, **kwargs)
class Test_ar_no_enforce(SARIMAXCoverageTest):
# // AR: (p,0,0) x (0,0,0,0)
# arima wpi, arima(3,0,0) noconstant vce(oim)
# save_results 1
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
kwargs['enforce_stationarity'] = False
kwargs['enforce_invertibility'] = False
kwargs['initial_variance'] = 1e9
# kwargs['loglikelihood_burn'] = 0
super(Test_ar_no_enforce, self).__init__(6, *args, **kwargs)
# Reset loglikelihood burn, which gets automatically set to the number
# of states if enforce_stationarity = False
self.model.ssm.loglikelihood_burn = 0
def test_loglike(self):
# Regression in the state vector gives a different loglikelihood, so
# just check that it's approximately the same
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=2
)
class Test_ar_exogenous(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3,0,0) noconstant vce(oim)
# save_results 8
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ar_exogenous, self).__init__(7, *args, **kwargs)
class Test_ar_exogenous_in_state(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3,0,0) noconstant vce(oim)
# save_results 8
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['mle_regression'] = False
super(Test_ar_exogenous_in_state, self).__init__(7, *args, **kwargs)
self.true_regression_coefficient = self.true_params[0]
self.true_params = self.true_params[1:]
def test_loglike(self):
# Regression in the state vector gives a different loglikelihood, so
# just check that it's approximately the same
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=2
)
def test_regression_coefficient(self):
# Test that the regression coefficient (estimated as the last filtered
# state estimate for the regression state) is the same as the Stata
# MLE state
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.filter_results.filtered_state[3][-1],
self.true_regression_coefficient,
self.decimal
)
class Test_ma(SARIMAXCoverageTest):
# // MA: (0,0,q) x (0,0,0,0)
# arima wpi, arima(0,0,3) noconstant vce(oim)
# save_results 9
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
super(Test_ma, self).__init__(8, *args, **kwargs)
class Test_ma_as_polynomial(SARIMAXCoverageTest):
# // MA: (0,0,q) x (0,0,0,0)
# arima wpi, arima(0,0,3) noconstant vce(oim)
# save_results 9
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,[1,1,1])
super(Test_ma_as_polynomial, self).__init__(8, *args, **kwargs)
class Test_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(0,0,3) noconstant vce(oim)
# save_results 10
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = 'c'
super(Test_ma_trend_c, self).__init__(9, *args, **kwargs)
class Test_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(0,0,3) noconstant vce(oim)
# save_results 11
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = 'ct'
super(Test_ma_trend_ct, self).__init__(10, *args, **kwargs)
class Test_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(0,0,3) noconstant vce(oim)
# save_results 12
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['trend'] = [1,0,0,1]
super(Test_ma_trend_polynomial, self).__init__(11, *args, **kwargs)
class Test_ma_diff(SARIMAXCoverageTest):
# // MA and I(d): (0,d,q) x (0,0,0,0)
# arima wpi, arima(0,2,3) noconstant vce(oim)
# save_results 13
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,3)
super(Test_ma_diff, self).__init__(12, *args, **kwargs)
class Test_ma_seasonal_diff(SARIMAXCoverageTest):
# // MA and I(D): (p,0,0) x (0,D,0,s)
# arima wpi, arima(0,0,3) sarima(0,2,0,4) noconstant vce(oim)
# save_results 14
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_ma_seasonal_diff, self).__init__(13, *args, **kwargs)
class Test_ma_diffuse(SARIMAXCoverageTest):
# // MA and diffuse initialization
# arima wpi, arima(0,0,3) noconstant vce(oim) diffuse
# save_results 15
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_ma_diffuse, self).__init__(14, *args, **kwargs)
class Test_ma_exogenous(SARIMAXCoverageTest):
# // MAX
# arima wpi x, arima(0,0,3) noconstant vce(oim)
# save_results 16
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,3)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ma_exogenous, self).__init__(15, *args, **kwargs)
class Test_arma(SARIMAXCoverageTest):
# // ARMA: (p,0,q) x (0,0,0,0)
# arima wpi, arima(3,0,3) noconstant vce(oim)
# save_results 17
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,3)
super(Test_arma, self).__init__(16, *args, **kwargs)
class Test_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3,0,2) noconstant vce(oim)
# save_results 18
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = 'c'
super(Test_arma_trend_c, self).__init__(17, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3,0,2) noconstant vce(oim)
# save_results 19
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = 'ct'
super(Test_arma_trend_ct, self).__init__(18, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, arima(3,0,2) noconstant vce(oim)
# save_results 20
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['trend'] = [1,0,0,1]
super(Test_arma_trend_polynomial, self).__init__(19, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_arma_diff(SARIMAXCoverageTest):
# // ARMA and I(d): (p,d,q) x (0,0,0,0)
# arima wpi, arima(3,2,2) noconstant vce(oim)
# save_results 21
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
super(Test_arma_diff, self).__init__(20, *args, **kwargs)
class Test_arma_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(D): (p,0,q) x (0,D,0,s)
# arima wpi, arima(3,0,2) sarima(0,2,0,4) noconstant vce(oim)
# save_results 22
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_arma_seasonal_diff, self).__init__(21, *args, **kwargs)
class Test_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(d) and I(D): (p,d,q) x (0,D,0,s)
# arima wpi, arima(3,2,2) sarima(0,2,0,4) noconstant vce(oim)
# save_results 23
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (0,2,0,4)
super(Test_arma_diff_seasonal_diff, self).__init__(22, *args, **kwargs)
class Test_arma_diffuse(SARIMAXCoverageTest):
# // ARMA and diffuse initialization
# arima wpi, arima(3,0,2) noconstant vce(oim) diffuse
# save_results 24
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_arma_diffuse, self).__init__(23, *args, **kwargs)
class Test_arma_exogenous(SARIMAXCoverageTest):
# // ARMAX
# arima wpi x, arima(3,0,2) noconstant vce(oim)
# save_results 25
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,0,2)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_arma_exogenous, self).__init__(24, *args, **kwargs)
class Test_seasonal_ar(SARIMAXCoverageTest):
# // SAR: (0,0,0) x (P,0,0,s)
# arima wpi, sarima(3,0,0,4) noconstant vce(oim)
# save_results 26
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
super(Test_seasonal_ar, self).__init__(25, *args, **kwargs)
class Test_seasonal_ar_as_polynomial(SARIMAXCoverageTest):
# // SAR: (0,0,0) x (P,0,0,s)
# arima wpi, sarima(3,0,0,4) noconstant vce(oim)
# save_results 26
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = ([1,1,1],0,0,4)
super(Test_seasonal_ar_as_polynomial, self).__init__(25, *args, **kwargs)
class Test_seasonal_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3,0,0,4) noconstant vce(oim)
# save_results 27
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = 'c'
super(Test_seasonal_ar_trend_c, self).__init__(26, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_seasonal_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3,0,0,4) noconstant vce(oim)
# save_results 28
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ar_trend_ct, self).__init__(27, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(3,0,0,4) noconstant vce(oim)
# save_results 29
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['trend'] = [1,0,0,1]
super(Test_seasonal_ar_trend_polynomial, self).__init__(28, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_ar_diff(SARIMAXCoverageTest):
# // SAR and I(d): (0,d,0) x (P,0,0,s)
# arima wpi, arima(0,2,0) sarima(3,0,0,4) noconstant vce(oim)
# save_results 30
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,0,0,4)
super(Test_seasonal_ar_diff, self).__init__(29, *args, **kwargs)
class Test_seasonal_ar_seasonal_diff(SARIMAXCoverageTest):
# // SAR and I(D): (0,0,0) x (P,D,0,s)
# arima wpi, sarima(3,2,0,4) noconstant vce(oim)
# save_results 31
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,2,0,4)
super(Test_seasonal_ar_seasonal_diff, self).__init__(30, *args, **kwargs)
class Test_seasonal_ar_diffuse(SARIMAXCoverageTest):
# // SAR and diffuse initialization
# arima wpi, sarima(3,0,0,4) noconstant vce(oim) diffuse
# save_results 32
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_ar_diffuse, self).__init__(31, *args, **kwargs)
class Test_seasonal_ar_exogenous(SARIMAXCoverageTest):
# // SARX
# arima wpi x, sarima(3,0,0,4) noconstant vce(oim)
# save_results 33
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,0,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ar_exogenous, self).__init__(32, *args, **kwargs)
class Test_seasonal_ma(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0,0,3,4) noconstant vce(oim)
# save_results 34
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
super(Test_seasonal_ma, self).__init__(33, *args, **kwargs)
class Test_seasonal_ma_as_polynomial(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0,0,3,4) noconstant vce(oim)
# save_results 34
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,[1,1,1],4)
super(Test_seasonal_ma_as_polynomial, self).__init__(33, *args, **kwargs)
class Test_seasonal_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(0,0,3,4) noconstant vce(oim)
# save_results 35
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = 'c'
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_c, self).__init__(34, *args, **kwargs)
class Test_seasonal_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(0,0,3,4) noconstant vce(oim)
# save_results 36
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ma_trend_ct, self).__init__(35, *args, **kwargs)
class Test_seasonal_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(0,0,3,4) noconstant vce(oim)
# save_results 37
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['trend'] = [1,0,0,1]
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_polynomial, self).__init__(36, *args, **kwargs)
class Test_seasonal_ma_diff(SARIMAXCoverageTest):
# // SMA and I(d): (0,d,0) x (0,0,Q,s)
# arima wpi, arima(0,2,0) sarima(0,0,3,4) noconstant vce(oim)
# save_results 38
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (0,0,3,4)
super(Test_seasonal_ma_diff, self).__init__(37, *args, **kwargs)
class Test_seasonal_ma_seasonal_diff(SARIMAXCoverageTest):
# // SMA and I(D): (0,0,0) x (0,D,Q,s)
# arima wpi, sarima(0,2,3,4) noconstant vce(oim)
# save_results 39
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,2,3,4)
super(Test_seasonal_ma_seasonal_diff, self).__init__(38, *args, **kwargs)
class Test_seasonal_ma_diffuse(SARIMAXCoverageTest):
# // SMA and diffuse initialization
# arima wpi, sarima(0,0,3,4) noconstant vce(oim) diffuse
# save_results 40
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_ma_diffuse, self).__init__(39, *args, **kwargs)
class Test_seasonal_ma_exogenous(SARIMAXCoverageTest):
# // SMAX
# arima wpi x, sarima(0,0,3,4) noconstant vce(oim)
# save_results 41
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (0,0,3,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ma_exogenous, self).__init__(40, *args, **kwargs)
class Test_seasonal_arma(SARIMAXCoverageTest):
# // SARMA: (0,0,0) x (P,0,Q,s)
# arima wpi, sarima(3,0,2,4) noconstant vce(oim)
# save_results 42
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
super(Test_seasonal_arma, self).__init__(41, *args, **kwargs)
class Test_seasonal_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3,0,2,4) noconstant vce(oim)
# save_results 43
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = 'c'
super(Test_seasonal_arma_trend_c, self).__init__(42, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:1] = (1 - self.true_params[1:4].sum()) * self.true_params[:1]
class Test_seasonal_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3,0,2,4) noconstant vce(oim)
# save_results 44
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = 'ct'
super(Test_seasonal_arma_trend_ct, self).__init__(43, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
class Test_seasonal_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1,0,0,1]
# arima wpi c t3, sarima(3,0,2,4) noconstant vce(oim)
# save_results 45
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['trend'] = [1,0,0,1]
kwargs['decimal'] = 3
super(Test_seasonal_arma_trend_polynomial, self).__init__(44, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[:2] = (1 - self.true_params[2:5].sum()) * self.true_params[:2]
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# And make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
# Known failure due to the complex step inducing non-stationary
# parameters, causing a failure in the solve_discrete_lyapunov call
# self.result.cov_params_cs
# self.result.cov_params_delta
self.result.cov_params_oim
self.result.cov_params_opg
class Test_seasonal_arma_diff(SARIMAXCoverageTest):
# // SARMA and I(d): (0,d,0) x (P,0,Q,s)
# arima wpi, arima(0,2,0) sarima(3,0,2,4) noconstant vce(oim)
# save_results 46
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,0,2,4)
super(Test_seasonal_arma_diff, self).__init__(45, *args, **kwargs)
class Test_seasonal_arma_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(D): (0,0,0) x (P,D,Q,s)
# arima wpi, sarima(3,2,2,4) noconstant vce(oim)
# save_results 47
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,2,2,4)
super(Test_seasonal_arma_seasonal_diff, self).__init__(46, *args, **kwargs)
class Test_seasonal_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(d) and I(D): (0,d,0) x (P,D,Q,s)
# arima wpi, arima(0,2,0) sarima(3,2,2,4) noconstant vce(oim)
# save_results 48
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,2,0)
kwargs['seasonal_order'] = (3,2,2,4)
super(Test_seasonal_arma_diff_seasonal_diff, self).__init__(47, *args, **kwargs)
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# And make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
# Known failure due to the complex step inducing non-stationary
# parameters, causing a failure in the solve_discrete_lyapunov call
# self.result.cov_params_cs
#s self.result.cov_params_delta
self.result.cov_params_oim
self.result.cov_params_opg
class Test_seasonal_arma_diffuse(SARIMAXCoverageTest):
# // SARMA and diffuse initialization
# arima wpi, sarima(3,0,2,4) noconstant vce(oim) diffuse
# save_results 49
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
kwargs['decimal'] = 3
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_arma_diffuse, self).__init__(48, *args, **kwargs)
class Test_seasonal_arma_exogenous(SARIMAXCoverageTest):
# // SARMAX
# arima wpi x, sarima(3,0,2,4) noconstant vce(oim)
# save_results 50
def __init__(self, *args, **kwargs):
kwargs['order'] = (0,0,0)
kwargs['seasonal_order'] = (3,0,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_arma_exogenous, self).__init__(49, *args, **kwargs)
class Test_sarimax_exogenous(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim)
# save_results 51
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_sarimax_exogenous, self).__init__(50, *args, **kwargs)
class Test_sarimax_exogenous_not_hamilton(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim)
# save_results 51
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['hamilton_representation'] = False
kwargs['simple_differencing'] = False
super(Test_sarimax_exogenous_not_hamilton, self).__init__(50, *args, **kwargs)
class Test_sarimax_exogenous_diffuse(SARIMAXCoverageTest):
# // SARIMAX and exogenous diffuse
# arima wpi x, arima(3,2,2) sarima(3,2,2,4) noconstant vce(oim) diffuse
# save_results 52
def __init__(self, *args, **kwargs):
kwargs['order'] = (3,2,2)
kwargs['seasonal_order'] = (3,2,2,4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['decimal'] = 2
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_sarimax_exogenous_diffuse, self).__init__(51, *args, **kwargs)
class Test_arma_exog_trend_polynomial_missing(SARIMAXCoverageTest):
# // ARMA and exogenous and trend polynomial and missing
# gen wpi2 = wpi
# replace wpi2 = . in 10/19
# arima wpi2 x c t3, arima(3,0,2) noconstant vce(oim)
# save_results 53
def __init__(self, *args, **kwargs):
endog = np.r_[results_sarimax.wpi1_data]
# Note we're using the non-missing exog data
kwargs['exog'] = ((endog - np.floor(endog))**2)[1:]
endog[9:19] = np.nan
endog = endog[1:] - endog[:-1]
endog[9] = np.nan
kwargs['order'] = (3,0,2)
kwargs['trend'] = [0,0,0,1]
kwargs['decimal'] = 1
super(Test_arma_exog_trend_polynomial_missing, self).__init__(52, endog=endog, *args, **kwargs)
# Modify true params to convert from mean to intercept form
self.true_params[0] = (1 - self.true_params[2:5].sum()) * self.true_params[0]
# Miscellaneous coverage tests
def test_simple_time_varying():
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
# Ignore the warning that MLE doesn't converge
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
# Test that the estimated variances of the errors are essentially zero
assert_almost_equal(res.params, [0,0], 7)
# Test that the time-varying coefficients are all 0.5 (except the first
# one)
assert_almost_equal(res.filter_results.filtered_state[0][1:], [0.5]*99, 9)
def test_invalid_time_varying():
assert_raises(ValueError, sarimax.SARIMAX, endog=[1,2,3], mle_regression=True, time_varying_regression=True)
def test_manual_stationary_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3,0,0))
res1 = mod1.filter([0.5,0.2,0.1,1])
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3,0,0))
mod2.ssm.initialize_known(res1.filter_results.initial_state,
res1.filter_results.initial_state_cov)
mod2.initialize_state() # a noop in this case (include for coverage)
res2 = mod2.filter([0.5,0.2,0.1,1])
# Create a third model with "known" initialization, but specified in kwargs
mod3 = sarimax.SARIMAX(endog, order=(3,0,0),
initialization='known',
initial_state=res1.filter_results.initial_state,
initial_state_cov=res1.filter_results.initial_state_cov)
res3 = mod3.filter([0.5,0.2,0.1,1])
# Create the forth model with stationary initialization specified in kwargs
mod4 = sarimax.SARIMAX(endog, order=(3,0,0), initialization='stationary')
res4 = mod4.filter([0.5,0.2,0.1,1])
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res2.filter_results.filtered_state)
assert_almost_equal(res1.llf, res3.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res3.filter_results.filtered_state)
assert_almost_equal(res1.llf, res4.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res4.filter_results.filtered_state)
def test_manual_approximate_diffuse_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3,0,0))
mod1.ssm.initialize_approximate_diffuse(1e9)
res1 = mod1.filter([0.5,0.2,0.1,1])
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3,0,0))
mod2.ssm.initialize_known(res1.filter_results.initial_state,
res1.filter_results.initial_state_cov)
mod2.initialize_state() # a noop in this case (include for coverage)
res2 = mod2.filter([0.5,0.2,0.1,1])
# Create a third model with "known" initialization, but specified in kwargs
mod3 = sarimax.SARIMAX(endog, order=(3,0,0),
initialization='known',
initial_state=res1.filter_results.initial_state,
initial_state_cov=res1.filter_results.initial_state_cov)
res3 = mod3.filter([0.5,0.2,0.1,1])
# Create the forth model with approximate diffuse initialization specified
# in kwargs
mod4 = sarimax.SARIMAX(endog, order=(3,0,0),
initialization='approximate_diffuse',
initial_variance=1e9)
res4 = mod4.filter([0.5,0.2,0.1,1])
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res2.filter_results.filtered_state)
assert_almost_equal(res1.llf, res3.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res3.filter_results.filtered_state)
assert_almost_equal(res1.llf, res4.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res4.filter_results.filtered_state)
def test_results():
endog = results_sarimax.wpi1_data
mod = sarimax.SARIMAX(endog, order=(1,0,1))
res = mod.filter([0.5,-0.5,1], cov_type='oim')
assert_almost_equal(res.arroots, 2.)
assert_almost_equal(res.maroots, 2.)
assert_almost_equal(res.arfreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.mafreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.arparams, [0.5])
assert_almost_equal(res.maparams, [-0.5])
| bsd-3-clause |
wk8910/bio_tools | 01.dadi_fsc/06.dadi_gene_flow_comparision/model_no_geneflow/01.model.py | 1 | 1646 | #! /usr/bin/env python
import os,sys,re
# import matplotlib
# matplotlib.use('Agg')
import numpy
import sys
from numpy import array
# import pylab
import dadi
import custom_model
spectrum_file = sys.argv[1]
data = dadi.Spectrum.from_file(spectrum_file)
data = data.fold()
ns = data.sample_sizes
pts_l = [40,50,60]
# nuPre,TPre,nu1,nu2,T
func = custom_model.custom_model
upper_bound = [10,1,10,10,1]
lower_bound = [1e-3,1e-3,1e-3,1e-3,1e-3]
# p0 = [5.11984,0.558449,0.403511,0.317042,0.403511,0.317042,0.0432683,0.0432683,0.0432683,1.44061,1.7391,1.44061,1.7391,1.44061,1.7391]
p0 = [1,0.1,1,1,0.1]
func_ex = dadi.Numerics.make_extrap_log_func(func)
p0 = dadi.Misc.perturb_params(p0, fold=1, upper_bound=upper_bound, lower_bound=lower_bound)
print('Beginning optimization ************************************************')
popt = dadi.Inference.optimize_log(p0, data, func_ex, pts_l,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=len(p0), maxiter=30)
print('Finshed optimization **************************************************')
print('Best-fit parameters: {0}'.format(popt))
model = func_ex(popt, ns, pts_l)
ll_model = dadi.Inference.ll_multinom(model, data)
print('Maximum log composite likelihood: {0}\n'.format(ll_model))
theta = dadi.Inference.optimal_sfs_scaling(model, data)
print('Optimal value of theta: {0}\n'.format(theta))
result=[ll_model,theta]+popt.tolist()
print('###DADIOUTPUT###')
# nuPre,TPre,nu1,nu2,T
print('likelihood\ttheta\tN.nuPre\tT.Tpre\tN.nu1\tN.nu2\tT.T')
print("\t".join(map(str,result)))
| mpl-2.0 |
andaag/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
bioinfo-core-BGU/neatseq-flow_modules | neatseq_flow_modules/Liron/Snippy_module/Snippy_parser.py | 2 | 6621 | import os, re
import argparse
import pandas as pd
parser = argparse.ArgumentParser(description='Pars MLST')
parser.add_argument('-M', type=str,
help='MetaData file')
parser.add_argument('-F', type=str,
help='Merged MLST typing file')
parser.add_argument('-O' , type=str, default=os.getcwd(),
help='Output file directory')
parser.add_argument('-C', type=float, default=0.95,
help='Percentage of identified allele cutoff to consider sample [0.0 - 1.0]')
parser.add_argument('--S_MetaData', type=str, default="Sample",
help='samples ID field in the metadata file')
parser.add_argument('--S_Merged', type=str, default="Sample",
help='samples ID field in the Merged file')
parser.add_argument('--Non_allelic', nargs='+', type=str, default=["Sample",'Status','Percentage_of_missing_genes'],
help='Non allelic fields in the Merged file')
parser.add_argument('--Fields', nargs='+', type=str, default=['Status','Percentage_of_missing_genes'],
help='Fields to move to the metadata file')
parser.add_argument('--Cut', action='store_true', default=False,
help='Use only samples with metadata information')
parser.add_argument('--FASTA', action='store_true', default=False,
help='The input is a FASTA file')
parser.add_argument('--Polymorphic_sites_only', action='store_true', default=False,
help='Filter Non Polymorphic Sites from fasta input file')
args = parser.parse_args()
Fields=[]
if args.Fields != None:
for field in args.Fields:
if len(field.split(","))>1:
for field_t in field.split(","):
Fields=Fields+[field_t]
else:
Fields=Fields+[field]
args.Fields=Fields
Fields=[]
if args.Non_allelic != None:
for field in args.Non_allelic:
if len(field.split(","))>1:
for field_t in field.split(","):
Fields=Fields+[field_t]
else:
Fields=Fields+[field]
args.Non_allelic=Fields
flag=0
if args.FASTA:
from Bio import AlignIO
msa=AlignIO.read(args.F,"fasta")
data=pd.DataFrame.from_records(msa)
data.index=[msa[int(x)].id for x in list(data.index)]
data=data.drop(data.columns[data.apply(lambda x: len(re.findall("[- N]",x.sum().upper()))>0 ,axis=0)],axis=1)
if args.Polymorphic_sites_only:
data=data.drop(data.columns[data.apply(lambda x: len(list(set(x.sum().upper())))==1 ,axis=0)],axis=1)
temp_data=data
else:
temp_data = pd.read_csv(args.F, sep='\t',index_col=False, encoding="ISO-8859-1")
temp_data=temp_data.set_index(args.S_Merged,drop=False).copy()
for j in temp_data.index:
for i in temp_data.columns:
if type(temp_data.loc[j,i])==str:
if temp_data.loc[j,i].startswith("New_Allele="):
temp_data.loc[temp_data.loc[:,i]==temp_data.loc[j,i],i]=i+"_"+str(j)
temp_data.index=[str(x) for x in temp_data.index]
if (args.M != None)&(args.Cut):
MetaData = pd.read_csv(args.M , sep='\t',index_col=False, encoding="ISO-8859-1")
MetaData=MetaData.set_index(args.S_MetaData,drop=False).copy()
MetaData.index=[str(x) for x in MetaData.index]
flag=1
temp_data=temp_data.loc[[x in MetaData.index for x in temp_data.index],].copy()
args.Non_allelic.extend([args.S_Merged])
args.Non_allelic.extend(args.Fields)
if None in args.Non_allelic:
args.Non_allelic.remove(None)
args.Non_allelic=set(args.Non_allelic)
def cut_rows(temp_data,cutoff,Non_allelic_rows):
drop=[x for x in temp_data.columns if x not in Non_allelic_rows]
temp_data=temp_data[drop]
stay=list()
for row in temp_data.index:
if (float(temp_data.ix[row].count())/ float(temp_data.shape[1]))>=cutoff:
stay.append(row)
else:
print("The Sample %s has lower percentage of identified allele (%%s) than the cutoff" % row % (float(temp_data.ix[row].count())/ float(temp_data.shape[1])))
return temp_data.ix[stay].copy()
def cut_col(temp_data,Non_allelic):
stay=list()
for col in temp_data.columns:
if col not in Non_allelic:
if temp_data[col].count()==temp_data.shape[0]:
stay.append(col)
else:
stay.append(col)
return temp_data[stay].copy()
def drop(data,fields,op=1):
if op==1:
for i in fields:
if i in data.columns:
data=data.drop(i,axis=1).copy()
else:
for i in fields:
if i in data:
data=data.drop(i).copy()
return data
new_temp_data=cut_col(cut_rows(temp_data, args.C ,args.Non_allelic),args.Non_allelic)
if args.FASTA:
new_temp_data['seq']=new_temp_data.apply(lambda x: x.sum().upper() ,axis=1)
g=new_temp_data.groupby('seq')
new_temp_data=new_temp_data.drop(new_temp_data.columns[new_temp_data.columns=='seq'],axis=1)
else:
m=new_temp_data.columns
m=drop(m,args.Non_allelic,0).copy()
g=new_temp_data.groupby(list(m[:]))
new_temp_data["Index"]=''
num=1
for i in g:
new_temp_data.loc[i[1].index,"Index"]=str(num)
num=num+1
if args.M != None:
if flag!=1:
MetaData = pd.read_csv(args.M , sep='\t',index_col=False, encoding="ISO-8859-1")
MetaData=MetaData.set_index(args.S_MetaData,drop=False).copy()
MetaData.index=[str(x) for x in MetaData.index]
MetaData=MetaData.join(new_temp_data["Index"])
MetaData=MetaData.loc[~MetaData["Index"].isnull(),:]
if args.Fields != None:
for field in args.Fields:
if field in temp_data.columns:
MetaData=MetaData.join(temp_data[field],lsuffix='_Old')
MetaData=MetaData.set_index("Index").copy()
else:
MetaData=new_temp_data[["Index"]].copy()
if args.Fields != None:
for field in args.Fields:
if field in temp_data.columns:
MetaData=MetaData.join(temp_data[field],lsuffix='_Old')
def isnumber(str):
if str==str:
try:
float(str)
return True
except ValueError:
return False
else:
return False
MetaData.applymap(lambda x: int(float(x)) if isnumber(x) else x).to_csv(os.path.join(args.O,'phyloviz_MetaData.tab'), sep='\t',index=True,float_format='%.0f')
new_temp_data=new_temp_data.set_index("Index").copy()
#new_temp_data=drop(new_temp_data,args.Non_allelic).copy()
new_temp_data.applymap(lambda x: int(float(x)) if isnumber(x) else x).to_csv(os.path.join(args.O, 'phyloviz_Alleles.tab'), sep='\t',index=True,float_format='%.0f')
| gpl-3.0 |
nschmidtALICE/AliPhysics | PWGHF/vertexingHF/macros/AnalyseAODMismatchTree.py | 15 | 5530 | import uproot
import numpy as np
import pandas as pd
from ROOT import TH1F, TH2F, TCanvas, TLegend
from ROOT import kRed, kAzure, gStyle, kIsland
def GetMaskOfBits(bits):
'''
Helper method to get bit mask from bits
Arguments
----------
- list of bits
Returns
----------
- mask corresponding to the input bits
'''
mask = 0
for bit in bits:
mask += 2**bit
return mask
def FilterBitDf(dfToFilter, column, bitsToTest, logic='or'):
'''
Method to apply selection testing one or more bits
Arguments
----------
- pandas dataframe to filter
- colum with bitmap
- list of bits to test
- logic to combine the bits (and, or)
Returns
----------
- filtered pandas dataframe
'''
maskOfBits = GetMaskOfBits(bitsToTest)
flags = dfToFilter[column].astype(int) & maskOfBits
if logic == 'or':
flags = flags.astype('bool')
elif logic == 'and':
flags -= maskOfBits
flags = ~flags.astype('bool')
elif logic == 'not':
flags = ~flags.astype('bool')
else:
print('Error: only and, or, and not logics are supported for bitwise operations')
return None
dfFilt = dfToFilter[flags.values]
return dfFilt
def main():
'''
Main function
'''
prod = 'LHC20g11a'
tree = uproot.open('AnalysisResults.root')['AOD_dAOD_Matching/fTreeMismatch']
df = tree.pandas.df()
df = df.sort_values(by=['file_name'])
pd.set_option('display.max_colwidth', None)
nFiles = len(df)
nEvents = sum(df['n_events'].values)
dfSel = {'good_files': df.query('mismatch_status == 0'),
'mism_ev': FilterBitDf(df, 'mismatch_status', [0]),
'mism_TProcessID': FilterBitDf(df, 'mismatch_status', [1]),
'mism_cand': FilterBitDf(df, 'mismatch_status', [2]),
'mism_ev_and_TProcessID': FilterBitDf(df, 'mismatch_status', [0, 1], logic='and'),
'mism_ev_and_cand': FilterBitDf(df, 'mismatch_status', [0, 2], logic='and'),
'mism_cand_and_TProcessID': FilterBitDf(df, 'mismatch_status', [1, 2], logic='and'),
'mism_all': FilterBitDf(df, 'mismatch_status', [1, 2, 3], logic='and')}
fracFiles, fracEv = {}, {}
for mism in dfSel:
fracFiles[mism] = len(dfSel[mism]) / nFiles
fracEv[mism] = sum(dfSel[mism]['n_events'].values) / nEvents
print(f'\nfraction of files with flag \"{mism}\": {fracFiles[mism]}')
print(f'fraction of events with flag \"{mism}\": {fracEv[mism]}')
gStyle.SetTitleSize(0.045, 'xy')
gStyle.SetLabelSize(0.04, 'xy')
gStyle.SetPadTopMargin(0.035)
gStyle.SetPadRightMargin(0.035)
gStyle.SetPadBottomMargin(0.15)
gStyle.SetPadLeftMargin(0.12)
gStyle.SetPadTickX(1)
gStyle.SetPadTickY(1)
gStyle.SetOptStat(0)
gStyle.SetPalette(kIsland)
hAODMism = TH1F('hAODMism', ';;fraction', 8, 0.5, 8.5)
hAODMism.SetLineWidth(2)
hAODMism.SetLineColor(kRed+1)
hAODMism.GetYaxis().SetRangeUser(1.e-5, 1.)
hEventMism = TH1F('hEventMism', ';;fraction', 8, 0.5, 8.5)
hEventMism.SetLineWidth(2)
hEventMism.SetLineColor(kAzure+4)
hEventMism.GetYaxis().SetRangeUser(1.e-5, 1.)
for iMism, mism in enumerate(dfSel):
hAODMism.GetXaxis().SetBinLabel(iMism+1, mism)
hEventMism.GetXaxis().SetBinLabel(iMism+1, mism)
hAODMism.SetBinContent(iMism+1, fracFiles[mism])
hEventMism.SetBinContent(iMism+1, fracEv[mism])
leg = TLegend(0.6, 0.7, 0.8, 0.9)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.AddEntry(hAODMism, 'AOD files', 'l')
leg.AddEntry(hEventMism, 'events', 'l')
cMismFrac = TCanvas('cMismFrac', '', 1920, 1080)
cMismFrac.SetLogy()
hAODMism.Draw()
hEventMism.Draw('same')
leg.Draw()
cMismFrac.Modified()
cMismFrac.Update()
dfSel['mism_cand'][['file_name']].to_csv(f'AOD_mismatch_{prod}_cand.txt', header=False, index=False)
dfSel['mism_ev'][['file_name']].to_csv(f'AOD_mismatch_{prod}_nevents.txt', header=False, index=False)
dfSel['mism_TProcessID'][['file_name']].to_csv(f'AOD_mismatch_{prod}_TProcessID.txt', header=False, index=False)
cMismFrac.SaveAs(f'AODMismatch_fractions_{prod}.pdf')
# check for files not tested (jobs failed)
runs = np.unique(df['run_number'].values)
nRuns = len(runs)
for iRun, run in enumerate(runs):
dfRunSel = df.query(f'run_number == {run}')
lastProcessedFile = list(dfRunSel['file_name'].values)[-1]
numLastProcFile = int(lastProcessedFile.decode().rpartition('AOD/')[2].rpartition('/')[0])
hFilesTested = TH2F(f'hFilesTested{run}', f'run {run};AOD number;', numLastProcFile, 0.5, numLastProcFile+0.5, 1, 0., 1.)
hFilesTested.GetZaxis().SetRangeUser(-0.001, 1.)
cFilesTested = TCanvas(f'cFilesTested{run}', '', 1920, 1080)
cFilesTested.SetTopMargin(0.12)
cFilesTested.SetRightMargin(0.12)
for fileName in dfRunSel['file_name']:
numProcFile = int(fileName.decode().rpartition('AOD/')[2].rpartition('/')[0])
hFilesTested.Fill(numProcFile, 0.5)
hFilesTested.Draw('colz')
cFilesTested.Modified()
cFilesTested.Update()
if iRun == 0:
cFilesTested.SaveAs(f'FilesTested_{prod}.pdf[')
cFilesTested.SaveAs(f'FilesTested_{prod}.pdf')
if iRun == nRuns-1:
cFilesTested.SaveAs(f'FilesTested_{prod}.pdf]')
input()
# call main function
main() | bsd-3-clause |
alexeyum/scikit-learn | benchmarks/bench_glmnet.py | 111 | 3890 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Newman101/scipy | scipy/interpolate/fitpack2.py | 8 | 61750 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
Default is k=3, a cubic spline.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If None (default), ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan`` . A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("x and y array must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None,None,None,None,None,k,None,len(t),t,
c,None,None,None,None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: x can be unordered but the
evaluation is more efficient if x is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of `cos(x) = sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("Input must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is k=3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
Constructing lsq spline using the knots from another spline:
>>> x = np.arange(10)
>>> s = UnivariateSpline(x, x, s=0)
>>> s.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
>>> knt = s.get_knots()
>>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
>>> s1.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite or not np.isfinite(t).all()):
raise ValueError("Input(s) must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
mth : str
Deprecated argument. Has no effect.
"""
x = np.asarray(x)
y = np.asarray(y)
if mth is not None:
warnings.warn("The `mth` argument is deprecated and will be removed",
FutureWarning)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx, ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=1)
if ier > 10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians, and must lie within (0, 2pi).
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/single_feature/best_kNN_PCA/test11_cross_validate_categories_mov_fixed_1200ms_scaled_method_v_area.py | 1 | 5019 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 41:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original[41:82,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
gfyoung/pandas | pandas/tests/indexes/interval/test_constructors.py | 3 | 17651 | from functools import partial
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas import (
Categorical,
CategoricalIndex,
Float64Index,
Index,
Int64Index,
Interval,
IntervalIndex,
date_range,
notna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
import pandas.core.common as com
@pytest.fixture(params=[None, "foo"])
def name(request):
return request.param
class ConstructorTests:
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
get_kwargs_from_breaks to the expected format.
"""
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize(
"breaks",
[
[3, 14, 15, 92, 653],
np.arange(10, dtype="int64"),
Int64Index(range(-10, 11)),
Float64Index(np.arange(20, 30, 0.5)),
date_range("20180101", periods=10),
date_range("20180101", periods=10, tz="US/Eastern"),
timedelta_range("1 day", periods=10),
],
)
def test_constructor(self, constructor, breaks, closed, name):
result_kwargs = self.get_kwargs_from_breaks(breaks, closed)
result = constructor(closed=closed, name=name, **result_kwargs)
assert result.closed == closed
assert result.name == name
assert result.dtype.subtype == getattr(breaks, "dtype", "int64")
tm.assert_index_equal(result.left, Index(breaks[:-1]))
tm.assert_index_equal(result.right, Index(breaks[1:]))
@pytest.mark.parametrize(
"breaks, subtype",
[
(Int64Index([0, 1, 2, 3, 4]), "float64"),
(Int64Index([0, 1, 2, 3, 4]), "datetime64[ns]"),
(Int64Index([0, 1, 2, 3, 4]), "timedelta64[ns]"),
(Float64Index([0, 1, 2, 3, 4]), "int64"),
(date_range("2017-01-01", periods=5), "int64"),
(timedelta_range("1 day", periods=5), "int64"),
],
)
def test_constructor_dtype(self, constructor, breaks, subtype):
# GH 19262: conversion via dtype parameter
warn = None
if subtype == "int64" and breaks.dtype.kind in ["M", "m"]:
# astype(int64) deprecated
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype))
expected = constructor(**expected_kwargs)
result_kwargs = self.get_kwargs_from_breaks(breaks)
iv_dtype = IntervalDtype(subtype, "right")
for dtype in (iv_dtype, str(iv_dtype)):
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = constructor(dtype=dtype, **result_kwargs)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"breaks",
[
Int64Index([0, 1, 2, 3, 4]),
Int64Index([0, 1, 2, 3, 4]),
Int64Index([0, 1, 2, 3, 4]),
Float64Index([0, 1, 2, 3, 4]),
date_range("2017-01-01", periods=5),
timedelta_range("1 day", periods=5),
],
)
def test_constructor_pass_closed(self, constructor, breaks):
# not passing closed to IntervalDtype, but to IntervalArray constructor
warn = None
if isinstance(constructor, partial) and constructor.func is Index:
# passing kwargs to Index is deprecated
warn = FutureWarning
iv_dtype = IntervalDtype(breaks.dtype)
result_kwargs = self.get_kwargs_from_breaks(breaks)
for dtype in (iv_dtype, str(iv_dtype)):
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = constructor(dtype=dtype, closed="left", **result_kwargs)
assert result.dtype.closed == "left"
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize("breaks", [[np.nan] * 2, [np.nan] * 4, [np.nan] * 50])
def test_constructor_nan(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_subtype = np.float64
expected_values = np.array(breaks[:-1], dtype=object)
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(np.array(result), expected_values)
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize(
"breaks",
[
[],
np.array([], dtype="int64"),
np.array([], dtype="float64"),
np.array([], dtype="datetime64[ns]"),
np.array([], dtype="timedelta64[ns]"),
],
)
def test_constructor_empty(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_values = np.array([], dtype=object)
expected_subtype = getattr(breaks, "dtype", np.int64)
assert result.empty
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(np.array(result), expected_values)
@pytest.mark.parametrize(
"breaks",
[
tuple("0123456789"),
list("abcdefghij"),
np.array(list("abcdefghij"), dtype=object),
np.array(list("abcdefghij"), dtype="<U1"),
],
)
def test_constructor_string(self, constructor, breaks):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
constructor(**self.get_kwargs_from_breaks(breaks))
@pytest.mark.parametrize("cat_constructor", [Categorical, CategoricalIndex])
def test_constructor_categorical_valid(self, constructor, cat_constructor):
# GH 21243/21253
if isinstance(constructor, partial) and constructor.func is Index:
# Index is defined to create CategoricalIndex from categorical data
pytest.skip()
breaks = np.arange(10, dtype="int64")
expected = IntervalIndex.from_breaks(breaks)
cat_breaks = cat_constructor(breaks)
result_kwargs = self.get_kwargs_from_breaks(cat_breaks)
result = constructor(**result_kwargs)
tm.assert_index_equal(result, expected)
def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
# invalid closed
msg = "closed must be one of 'right', 'left', 'both', 'neither'"
with pytest.raises(ValueError, match=msg):
constructor(closed="invalid", **filler)
# unsupported dtype
msg = "dtype must be an IntervalDtype, got int64"
with pytest.raises(TypeError, match=msg):
constructor(dtype="int64", **filler)
# invalid dtype
msg = "data type [\"']invalid[\"'] not understood"
with pytest.raises(TypeError, match=msg):
constructor(dtype="invalid", **filler)
# no point in nesting periods in an IntervalIndex
periods = period_range("2000-01-01", periods=10)
periods_kwargs = self.get_kwargs_from_breaks(periods)
msg = "Period dtypes are not supported, use a PeriodIndex instead"
with pytest.raises(ValueError, match=msg):
constructor(**periods_kwargs)
# decreasing values
decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1))
msg = "left side of interval must be <= right side"
with pytest.raises(ValueError, match=msg):
constructor(**decreasing_kwargs)
class TestFromArrays(ConstructorTests):
"""Tests specific to IntervalIndex.from_arrays"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_arrays
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_arrays
"""
return {"left": breaks[:-1], "right": breaks[1:]}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_arrays(data[:-1], data[1:])
# unequal length
left = [0, 1, 2]
right = [2, 3]
msg = "left and right must have the same length"
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(left, right)
@pytest.mark.parametrize(
"left_subtype, right_subtype", [(np.int64, np.float64), (np.float64, np.int64)]
)
def test_mixed_float_int(self, left_subtype, right_subtype):
"""mixed int/float left/right results in float for both sides"""
left = np.arange(9, dtype=left_subtype)
right = np.arange(1, 10, dtype=right_subtype)
result = IntervalIndex.from_arrays(left, right)
expected_left = Float64Index(left)
expected_right = Float64Index(right)
expected_subtype = np.float64
tm.assert_index_equal(result.left, expected_left)
tm.assert_index_equal(result.right, expected_right)
assert result.dtype.subtype == expected_subtype
class TestFromBreaks(ConstructorTests):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_breaks
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
"""
return {"breaks": breaks}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_breaks(data)
def test_length_one(self):
"""breaks of length one produce an empty IntervalIndex"""
breaks = [0]
result = IntervalIndex.from_breaks(breaks)
expected = IntervalIndex.from_breaks([])
tm.assert_index_equal(result, expected)
def test_left_right_dont_share_data(self):
# GH#36310
breaks = np.arange(5)
result = IntervalIndex.from_breaks(breaks)._data
assert result._left.base is None or result._left.base is not result._right.base
class TestFromTuples(ConstructorTests):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_tuples
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
if len(breaks) == 0:
return {"data": breaks}
tuples = list(zip(breaks[:-1], breaks[1:]))
if isinstance(breaks, (list, tuple)):
return {"data": tuples}
elif is_categorical_dtype(breaks):
return {"data": breaks._constructor(tuples)}
return {"data": com.asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
# non-tuple
tuples = [(0, 1), 2, (3, 4)]
msg = "IntervalIndex.from_tuples received an invalid item, 2"
with pytest.raises(TypeError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
# too few/many items
tuples = [(0, 1), (2,), (3, 4)]
msg = "IntervalIndex.from_tuples requires tuples of length 2, got {t}"
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
tuples = [(0, 1), (2, 3, 4), (5, 6)]
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
def test_na_tuples(self):
# tuple (NA, NA) evaluates the same as NA as an element
na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
tm.assert_index_equal(idx_na_tuple, idx_na_element)
class TestClassConstructors(ConstructorTests):
"""Tests specific to the IntervalIndex/Index constructors"""
@pytest.fixture(
params=[IntervalIndex, partial(Index, dtype="interval")],
ids=["IntervalIndex", "Index"],
)
def constructor(self, request):
return request.param
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
"""
if len(breaks) == 0:
return {"data": breaks}
ivs = [
Interval(left, right, closed) if notna(left) else left
for left, right in zip(breaks[:-1], breaks[1:])
]
if isinstance(breaks, list):
return {"data": ivs}
elif is_categorical_dtype(breaks):
return {"data": breaks._constructor(ivs)}
return {"data": np.array(ivs, dtype=object)}
def test_generic_errors(self, constructor):
"""
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
pass
def test_constructor_string(self):
# GH23013
# When forming the interval from breaks,
# the interval of strings is already forbidden.
pass
def test_constructor_errors(self, constructor):
# mismatched closed within intervals with no constructor override
ivs = [Interval(0, 1, closed="right"), Interval(2, 3, closed="left")]
msg = "intervals must all be closed on the same side"
with pytest.raises(ValueError, match=msg):
constructor(ivs)
# scalar
msg = (
r"IntervalIndex\(...\) must be called with a collection of "
"some kind, 5 was passed"
)
with pytest.raises(TypeError, match=msg):
constructor(5)
# not an interval
msg = "type <class 'numpy.int64'> with value 0 is not an interval"
with pytest.raises(TypeError, match=msg):
constructor([0, 1])
@pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning")
@pytest.mark.parametrize(
"data, closed",
[
([], "both"),
([np.nan, np.nan], "neither"),
(
[Interval(0, 3, closed="neither"), Interval(2, 5, closed="neither")],
"left",
),
(
[Interval(0, 3, closed="left"), Interval(2, 5, closed="right")],
"neither",
),
(IntervalIndex.from_breaks(range(5), closed="both"), "right"),
],
)
def test_override_inferred_closed(self, constructor, data, closed):
# GH 19370
if isinstance(data, IntervalIndex):
tuples = data.to_tuples()
else:
tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data]
expected = IntervalIndex.from_tuples(tuples, closed=closed)
result = constructor(data, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"values_constructor", [list, np.array, IntervalIndex, IntervalArray]
)
def test_index_object_dtype(self, values_constructor):
# Index(intervals, dtype=object) is an Index (not an IntervalIndex)
intervals = [Interval(0, 1), Interval(1, 2), Interval(2, 3)]
values = values_constructor(intervals)
result = Index(values, dtype=object)
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
def test_index_mixed_closed(self):
# GH27172
intervals = [
Interval(0, 1, closed="left"),
Interval(1, 2, closed="right"),
Interval(2, 3, closed="neither"),
Interval(3, 4, closed="both"),
]
result = Index(intervals)
expected = Index(intervals, dtype=object)
tm.assert_index_equal(result, expected)
def test_dtype_closed_mismatch():
# GH#38394 closed specified in both dtype and IntervalIndex constructor
dtype = IntervalDtype(np.int64, "left")
msg = "closed keyword does not match dtype.closed"
with pytest.raises(ValueError, match=msg):
IntervalIndex([], dtype=dtype, closed="neither")
with pytest.raises(ValueError, match=msg):
IntervalArray([], dtype=dtype, closed="neither")
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/gaussian_process/gpc.py | 42 | 31571 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_: array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_: array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class: string, default: "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
spallavolu/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
fredhusser/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
DataViva/dataviva-scripts | scripts/rais/_rdo.py | 1 | 2988 | import sys, os
import pandas as pd
import numpy as np
file_path = os.path.dirname(os.path.realpath(__file__))
ps_calcs_lib_path = os.path.abspath(os.path.join(file_path, "../../", "lib/ps_calcs"))
sys.path.insert(0, ps_calcs_lib_path)
import ps_calcs
def rdo(ybi, yi, year, depths):
rca_dist_opp = []
for geo_level in depths["bra"]:
print "geo level:", geo_level
ybi_data = ybi.reset_index()
bra_criterion = ybi_data["bra_id"].str.len() == geo_level
cnae_criterion = ybi_data["cnae_id"].str.len() == 6
ybi_data = ybi_data[bra_criterion & cnae_criterion]
# ybi_data = ybi_data.reindex(index=ybi_index)
# ybi_data = ybi_data.drop(["year", "num_emp", "num_est", "wage_avg", "num_emp_est"], axis=1)
ybi_data = ybi_data[["bra_id", "cnae_id", "wage"]]
# ybi_data = ybi_data.unstack()
# levels = ybi_data.columns.levels
# labels = ybi_data.columns.labels
# ybi_data.columns = levels[1][labels[1]]
'''
RCAS
'''
# ybi_data = ybi_data.pivot(index="bra_id", columns="cnae_id", values="wage").fillna(0)
ybi_data = ybi_data.pivot(index="bra_id", columns="cnae_id", values="wage")
rcas = ps_calcs.rca(ybi_data)
rcas_binary = rcas.copy()
rcas_binary[rcas_binary >= 1] = 1
rcas_binary[rcas_binary < 1] = 0
'''
DISTANCES
'''
'''calculate proximity for opportunity gain calculation'''
prox = ps_calcs.proximity(rcas_binary)
'''calculate distances using proximity'''
dist = ps_calcs.distance(rcas_binary, prox).fillna(0)
'''
OPP GAIN
'''
'''calculate product complexity'''
pci = ps_calcs.complexity(rcas_binary)[1]
'''calculate opportunity gain'''
opp_gain = ps_calcs.opportunity_gain(rcas_binary, prox, pci)
rdo = []
for bra in rcas.index:
for cnae in rcas.columns:
rdo.append([year, bra, cnae, rcas[cnae][bra], dist[cnae][bra], opp_gain[cnae][bra]])
rca_dist_opp += rdo
# now time to merge!
print "merging datasets..."
ybi_rdo = pd.DataFrame(rca_dist_opp, columns=["year", "bra_id", "cnae_id", "rca", "distance", "opp_gain"])
ybi_rdo["year"] = ybi_rdo["year"].astype(int)
ybi_rdo["rca"][ybi_rdo["rca"] == 0] = np.nan
ybi_rdo = ybi_rdo.set_index(["year", "bra_id", "cnae_id"])
# get union of both sets of indexes
all_ybi_indexes = set(ybi.index).union(set(ybi_rdo.index))
all_ybi_indexes = pd.MultiIndex.from_tuples(all_ybi_indexes, names=["year", "bra_id", "cnae_id"])
# ybi = ybi.reindex(index=all_ybi_indexes, fill_value=0)
ybi = ybi.reindex(index=all_ybi_indexes)
ybi["rca"] = ybi_rdo["rca"]
ybi["distance"] = ybi_rdo["distance"]
ybi["opp_gain"] = ybi_rdo["opp_gain"]
return ybi | mit |
chrisdembia/perimysium | perimysium/stoplot.py | 2 | 4019 | #!/usr/bin/env python
"""
http://wiki.scipy.org/Cookbook/EmbeddingInTraitsGUI
http://code.enthought.com/projects/traits/docs/html/_static/mpl_figure_editor.py
https://github.com/enthought/traitsui/blob/master/examples/demo/Standard_Editors/CheckListEditor_simple_demo.py
http://stackoverflow.com/questions/16663908/enthought-traits-ui-add-values-dynamically-to-values-trait-of-checklisteditor
http://stackoverflow.com/questions/23650049/traitsui-checklisteditor-changing-the-case-of-values
"""
import sys
from numpy import deg2rad, rad2deg
import wx
import matplotlib
# We want matplotlib to use a wxPython backend
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib import pyplot as plt
from traits.api import HasTraits, Any, Instance, List, Str, on_trait_change
from traitsui.api import View, HSplit, Item, Group, HGroup, VGroup, \
CheckListEditor, HFlow, SetEditor
from traitsui.wx.editor import Editor
from traitsui.wx.basic_editor_factory import BasicEditorFactory
from dataman import storage2numpy, TRCFile
class _MPLFigureEditor(Editor):
scrollable = True
canvas = Instance(FigureCanvas)
def init(self, parent):
self.control = self._create_canvas(parent)
self.set_tooltip()
def update_editor(self):
pass
def _create_canvas(self, parent):
""" Create the MPL canvas. """
# The panel lets us add additional controls.
panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(sizer)
# matplotlib commands to create a canvas
self.canvas = FigureCanvas(panel, -1, self.value)
sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
toolbar = NavigationToolbar2Wx(self.canvas)
sizer.Add(toolbar, 0, wx.EXPAND)
self.value.canvas.SetMinSize((10,10))
return panel
class MPLFigureEditor(BasicEditorFactory):
klass = _MPLFigureEditor
class StoragePlotter(HasTraits):
figure = Instance(Figure, ())
avail_columns = List(Str)
columns = List(
editor=SetEditor(name='avail_columns',
format_func=lambda x: x,
),
)
view = View(
HSplit(
Item('figure', editor=MPLFigureEditor(),
show_label=False
),
Item('columns', style='custom',
show_label=False,
),
scrollable=True,
),
width=700,
height=400,
resizable=True,
)
def __init__(self, stofpath, *args):
HasTraits.__init__(self, trait_value=True)
if stofpath.endswith('.sto') or stofpath.endswith('.mot'):
if 'StaticOptimization' in stofpath:
self.data = storage2numpy(stofpath, excess_header_entries=2)
else:
self.data = storage2numpy(stofpath)
elif stofpath.endswith('.trc'):
self.data = TRCFile(stofpath).data
avail_columns = list(self.data.dtype.names)
avail_columns.remove('time')
self.avail_columns = avail_columns
self.axes = self.figure.add_subplot(111)
# TODO
#for arg in args:
# self.columns.append(arg)
# self._columns_changed()
@on_trait_change('columns')
def _columns_changed(self):
self.axes.cla()
for name in self.columns:
self.axes.plot(self.data['time'], self.data[name], label=name)
self.axes.set_xlabel('time (s)')
self.axes.legend(loc='best')
self.figure.canvas.draw()
def start_plotter(*args, **kwargs):
"""TODO"""
plotter = StoragePlotter(*args, **kwargs)
plotter.configure_traits()
if __name__ == '__main__':
start_plotter(*sys.argv[1:])
| bsd-3-clause |
ninthdayjt/gridsketch | util.py | 1 | 1512 | from PIL import Image, ImageFilter
import numpy as np
#import pandas as pd
import os
# def Merge(wsPath, uvPath, outputPath):
# base_img = Image.open(wsPath)
# tmp_img = Image.open(uvPath)
# r, g, b, a = tmp_img.split()
# base_img.paste(tmp_img, mask=a)
# base_img.save(outputPath, "PNG")
# os.remove(uvPath)
# os.remove(wsPath)
def cut(path, targetpath):
pathdata = np.array(Image.open(path).filter(ImageFilter.SHARPEN))
targetdata = np.array(Image.open(targetpath))
pathdata[pathdata == 255] = 1
newdata = pathdata * targetdata
img = Image.fromarray(newdata)
img.save(targetpath, "PNG")
# def interp(data, lats, lons, res):
# df = pd.DataFrame(data, index=lats, columns=lons)
# interpLons = np.linspace(lons[0], lons[-1], num=res)
# interplats = np.linspace(lats[0], lats[-1], num=res)
# df_interpRow = pd.DataFrame(np.zeros([len(interplats), data.shape[1]]) + np.nan,
# columns=lons, index=interplats)
# df_interpCol = pd.DataFrame(np.zeros([data.shape[0], len(interpLons)]) + np.nan,
# index=lats, columns=interpLons)
# new = pd.concat([df, df_interpRow], axis=0).sort_index(
# axis=0, ascending=True)
# new = pd.concat([new, df_interpCol], axis=1).sort_index(
# axis=1, ascending=True)
# new = new.interpolate(method='cubic', axis=0).interpolate(
# method='cubic', axis=1)
# return(new.values, new.index, new.columns)
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_wx.py | 6 | 64967 | """
A wxPython backend for matplotlib, based (very heavily) on
backend_template.py and backend_gtk.py
Author: Jeremy O'Donoghue ([email protected])
Derived from original copyright work by John Hunter
([email protected])
Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4
License: This work is licensed under a PSF compatible license. A copy
should be included with this source code.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
import sys
import os
import os.path
import math
import weakref
import warnings
import numpy as np
import matplotlib
from matplotlib.backend_bases import (RendererBase, GraphicsContextBase,
FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
cursors, TimerBase)
from matplotlib.backend_bases import ShowBase
from matplotlib.backend_bases import _has_pil
from matplotlib._pylab_helpers import Gcf
from matplotlib.cbook import (is_string_like, is_writable_file_like,
warn_deprecated)
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.widgets import SubplotTool
from matplotlib import rcParams
from . import wx_compat as wxc
import wx
# Debugging settings here...
# Debug level set here. If the debug level is less than 5, information
# messages (progressively more info for lower value) are printed. In addition,
# traceback is performed, and pdb activated, for all uncaught exceptions in
# this case
_DEBUG = 5
if _DEBUG < 5:
import traceback
import pdb
_DEBUG_lvls = {1: 'Low ', 2: 'Med ', 3: 'High', 4: 'Error'}
def DEBUG_MSG(string, lvl=3, o=None):
if lvl >= _DEBUG:
cls = o.__class__
# Jeremy, often times the commented line won't print but the
# one below does. I think WX is redefining stderr, damned
# beast
#print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
print("%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls))
def debug_on_error(type, value, tb):
"""Code due to Thomas Heller - published in Python Cookbook (O'Reilley)"""
traceback.print_exc(type, value, tb)
print()
pdb.pm() # jdh uncomment
class fake_stderr(object):
"""
Wx does strange things with stderr, as it makes the assumption that
there is probably no console. This redirects stderr to the console, since
we know that there is one!
"""
def write(self, msg):
print("Stderr: %s\n\r" % msg)
#if _DEBUG < 5:
#sys.excepthook = debug_on_error
#WxLogger =wx.LogStderr()
#sys.stderr = fake_stderr
# the True dots per inch on the screen; should be display dependent
# see
# http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5
# for some info about screen dpi
PIXELS_PER_INCH = 75
# Delay time for idle checks
IDLE_DELAY = 5
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog = wx.MessageDialog(parent=parent,
message=msg,
caption='Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
class TimerWx(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses WxTimer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timer event to our handler.
# For WX, the events have to use a widget for binding.
self.parent = parent
self._timer = wx.Timer(self.parent, wx.NewId())
self.parent.Bind(wx.EVT_TIMER, self._on_timer, self._timer)
# Unbinding causes Wx to stop for some reason. Disabling for now.
# def __del__(self):
# TimerBase.__del__(self)
# self.parent.Bind(wx.EVT_TIMER, None, self._timer)
def _timer_start(self):
self._timer.Start(self._interval, self._single)
def _timer_stop(self):
self._timer.Stop()
def _timer_set_interval(self):
self._timer_start()
def _timer_set_single_shot(self):
self._timer.Start()
def _on_timer(self, *args):
TimerBase._on_timer(self)
class RendererWx(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles. It acts as the
'renderer' instance used by many classes in the hierarchy.
"""
# In wxPython, drawing is performed on a wxDC instance, which will
# generally be mapped to the client aread of the window displaying
# the plot. Under wxPython, the wxDC instance has a wx.Pen which
# describes the colour and weight of any lines drawn, and a wxBrush
# which describes the fill colour of any closed polygon.
fontweights = wxc.fontweights
fontangles = wxc.fontangles
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = wxc.fontnames
def __init__(self, bitmap, dpi):
"""
Initialise a wxWindows renderer instance.
"""
warn_deprecated('2.0', message="The WX backend is "
"deprecated. It's untested "
"and will be removed in Matplotlib 2.2. "
"Use the WXAgg backend instead. "
"See Matplotlib usage FAQ for more info on backends.",
alternative='WXAgg')
RendererBase.__init__(self)
DEBUG_MSG("__init__()", 1, self)
self.width = bitmap.GetWidth()
self.height = bitmap.GetHeight()
self.bitmap = bitmap
self.fontd = {}
self.dpi = dpi
self.gc = None
def flipy(self):
return True
def offset_text_height(self):
return True
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
# return 1, 1
if ismath:
s = self.strip_math(s)
if self.gc is None:
gc = self.new_gc()
else:
gc = self.gc
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
gfx_ctx.SetFont(font, wx.BLACK)
w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
return w, h, descent
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
gfx_ctx = gc.gfx_ctx
if gfx_ctx._lastcliprect != new_bounds:
gfx_ctx._lastcliprect = new_bounds
if new_bounds is None:
gfx_ctx.ResetClip()
else:
gfx_ctx.Clip(new_bounds[0],
self.height - new_bounds[1] - new_bounds[3],
new_bounds[2], new_bounds[3])
@staticmethod
def convert_path(gfx_ctx, path, transform):
wxpath = gfx_ctx.CreatePath()
for points, code in path.iter_segments(transform):
if code == Path.MOVETO:
wxpath.MoveToPoint(*points)
elif code == Path.LINETO:
wxpath.AddLineToPoint(*points)
elif code == Path.CURVE3:
wxpath.AddQuadCurveToPoint(*points)
elif code == Path.CURVE4:
wxpath.AddCurveToPoint(*points)
elif code == Path.CLOSEPOLY:
wxpath.CloseSubpath()
return wxpath
def draw_path(self, gc, path, transform, rgbFace=None):
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
wxpath = self.convert_path(gfx_ctx, path, transform)
if rgbFace is not None:
gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
gfx_ctx.DrawPath(wxpath)
else:
gfx_ctx.StrokePath(wxpath)
gc.unselect()
def draw_image(self, gc, x, y, im):
bbox = gc.get_clip_rectangle()
if bbox is not None:
l, b, w, h = bbox.bounds
else:
l = 0
b = 0
w = self.width
h = self.height
rows, cols = im.shape[:2]
bitmap = wxc.BitmapFromBuffer(cols, rows, im.tostring())
gc = self.get_gc()
gc.select()
gc.gfx_ctx.DrawBitmap(bitmap, int(l), int(self.height - b),
int(w), int(-h))
gc.unselect()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if ismath:
s = self.strip_math(s)
DEBUG_MSG("draw_text()", 1, self)
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
color = gc.get_wxcolour(gc.get_rgb())
gfx_ctx.SetFont(font, color)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
x = int(x)
y = int(y - h)
if angle == 0.0:
gfx_ctx.DrawText(s, x, y)
else:
rads = angle / 180.0 * math.pi
xo = h * math.sin(rads)
yo = h * math.cos(rads)
gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
gc.unselect()
def new_gc(self):
"""
Return an instance of a GraphicsContextWx, and sets the current gc copy
"""
DEBUG_MSG('new_gc()', 2, self)
self.gc = GraphicsContextWx(self.bitmap, self)
self.gc.select()
self.gc.unselect()
return self.gc
def get_gc(self):
"""
Fetch the locally cached gc.
"""
# This is a dirty hack to allow anything with access to a renderer to
# access the current graphics context
assert self.gc is not None, "gc must be defined"
return self.gc
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font = wx.Font(int(size + 0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
return points * (PIXELS_PER_INCH / 72.0 * self.dpi / 72.0)
class GraphicsContextWx(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc...
This class stores a reference to a wxMemoryDC, and a
wxGraphicsContext that draws to it. Creating a wxGraphicsContext
seems to be fairly heavy, so these objects are cached based on the
bitmap object that is passed in.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). wxPython uses an int interval, but
since wxPython colour management is rather simple, I have not chosen
to implement a separate colour manager class.
"""
_capd = {'butt': wx.CAP_BUTT,
'projecting': wx.CAP_PROJECTING,
'round': wx.CAP_ROUND}
_joind = {'bevel': wx.JOIN_BEVEL,
'miter': wx.JOIN_MITER,
'round': wx.JOIN_ROUND}
_dashd_wx = wxc.dashd_wx
_cache = weakref.WeakKeyDictionary()
def __init__(self, bitmap, renderer):
GraphicsContextBase.__init__(self)
#assert self.Ok(), "wxMemoryDC not OK to use"
DEBUG_MSG("__init__()", 1, self)
DEBUG_MSG("__init__() 2: %s" % bitmap, 1, self)
dc, gfx_ctx = self._cache.get(bitmap, (None, None))
if dc is None:
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
gfx_ctx = wx.GraphicsContext.Create(dc)
gfx_ctx._lastcliprect = None
self._cache[bitmap] = dc, gfx_ctx
self.bitmap = bitmap
self.dc = dc
self.gfx_ctx = gfx_ctx
self._pen = wx.Pen('BLACK', 1, wx.SOLID)
gfx_ctx.SetPen(self._pen)
self._style = wx.SOLID
self.renderer = renderer
def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True
def unselect(self):
"""
Select a Null bitmasp into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(wx.NullBitmap)
self.IsSelected = False
def set_foreground(self, fg, isRGBA=None):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
# Implementation note: wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen.
# Here we set both to the same colour - if a figure is not to be
# filled, the renderer will set the brush to be transparent
# Same goes for text foreground...
DEBUG_MSG("set_foreground()", 1, self)
self.select()
GraphicsContextBase.set_foreground(self, fg, isRGBA)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_graylevel(self, frac):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
DEBUG_MSG("set_graylevel()", 1, self)
self.select()
GraphicsContextBase.set_graylevel(self, frac)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linewidth(self, w):
"""
Set the line width.
"""
w = float(w)
DEBUG_MSG("set_linewidth()", 1, self)
self.select()
if w > 0 and w < 1:
w = 1
GraphicsContextBase.set_linewidth(self, w)
lw = int(self.renderer.points_to_pixels(self._linewidth))
if lw == 0:
lw = 1
self._pen.SetWidth(lw)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
DEBUG_MSG("set_capstyle()", 1, self)
self.select()
GraphicsContextBase.set_capstyle(self, cs)
self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linestyle(self, ls):
"""
Set the line style to be one of
"""
DEBUG_MSG("set_linestyle()", 1, self)
self.select()
GraphicsContextBase.set_linestyle(self, ls)
try:
self._style = GraphicsContextWx._dashd_wx[ls]
except KeyError:
self._style = wx.LONG_DASH # Style not used elsewhere...
# On MS Windows platform, only line width of 1 allowed for dash lines
if wx.Platform == '__WXMSW__':
self.set_linewidth(1)
self._pen.SetStyle(self._style)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def get_wxcolour(self, color):
"""return a wx.Colour from RGB format"""
DEBUG_MSG("get_wx_color()", 1, self)
if len(color) == 3:
r, g, b = color
r *= 255
g *= 255
b *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b))
else:
r, g, b, a = color
r *= 255
g *= 255
b *= 255
a *= 255
return wx.Colour(
red=int(r),
green=int(g),
blue=int(b),
alpha=int(a))
class FigureCanvasWx(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window
probably implements a wx.Sizer to control the displayed control size - but
we give a hint as to our preferred minimum size.
"""
keyvald = {
wx.WXK_CONTROL: 'control',
wx.WXK_SHIFT: 'shift',
wx.WXK_ALT: 'alt',
wx.WXK_LEFT: 'left',
wx.WXK_UP: 'up',
wx.WXK_RIGHT: 'right',
wx.WXK_DOWN: 'down',
wx.WXK_ESCAPE: 'escape',
wx.WXK_F1: 'f1',
wx.WXK_F2: 'f2',
wx.WXK_F3: 'f3',
wx.WXK_F4: 'f4',
wx.WXK_F5: 'f5',
wx.WXK_F6: 'f6',
wx.WXK_F7: 'f7',
wx.WXK_F8: 'f8',
wx.WXK_F9: 'f9',
wx.WXK_F10: 'f10',
wx.WXK_F11: 'f11',
wx.WXK_F12: 'f12',
wx.WXK_SCROLL: 'scroll_lock',
wx.WXK_PAUSE: 'break',
wx.WXK_BACK: 'backspace',
wx.WXK_RETURN: 'enter',
wx.WXK_INSERT: 'insert',
wx.WXK_DELETE: 'delete',
wx.WXK_HOME: 'home',
wx.WXK_END: 'end',
wx.WXK_PAGEUP: 'pageup',
wx.WXK_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD0: '0',
wx.WXK_NUMPAD1: '1',
wx.WXK_NUMPAD2: '2',
wx.WXK_NUMPAD3: '3',
wx.WXK_NUMPAD4: '4',
wx.WXK_NUMPAD5: '5',
wx.WXK_NUMPAD6: '6',
wx.WXK_NUMPAD7: '7',
wx.WXK_NUMPAD8: '8',
wx.WXK_NUMPAD9: '9',
wx.WXK_NUMPAD_ADD: '+',
wx.WXK_NUMPAD_SUBTRACT: '-',
wx.WXK_NUMPAD_MULTIPLY: '*',
wx.WXK_NUMPAD_DIVIDE: '/',
wx.WXK_NUMPAD_DECIMAL: 'dec',
wx.WXK_NUMPAD_ENTER: 'enter',
wx.WXK_NUMPAD_UP: 'up',
wx.WXK_NUMPAD_RIGHT: 'right',
wx.WXK_NUMPAD_DOWN: 'down',
wx.WXK_NUMPAD_LEFT: 'left',
wx.WXK_NUMPAD_PAGEUP: 'pageup',
wx.WXK_NUMPAD_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD_HOME: 'home',
wx.WXK_NUMPAD_END: 'end',
wx.WXK_NUMPAD_INSERT: 'insert',
wx.WXK_NUMPAD_DELETE: 'delete',
}
def __init__(self, parent, id, figure):
"""
Initialise a FigureWx instance.
- Initialise the FigureCanvasBase and wxPanel parents.
- Set event handlers for:
EVT_SIZE (Resize event)
EVT_PAINT (Paint event)
"""
FigureCanvasBase.__init__(self, figure)
# Set preferred window size hint - helps the sizer (if one is
# connected)
l, b, w, h = figure.bbox.bounds
w = int(math.ceil(w))
h = int(math.ceil(h))
wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
def do_nothing(*args, **kwargs):
warnings.warn(
"could not find a setinitialsize function for backend_wx; "
"please report your wxpython version=%s "
"to the matplotlib developers list" %
wxc.backend_version)
pass
# try to find the set size func across wx versions
try:
getattr(self, 'SetInitialSize')
except AttributeError:
self.SetInitialSize = getattr(self, 'SetBestFittingSize',
do_nothing)
if not hasattr(self, 'IsShownOnScreen'):
self.IsShownOnScreen = getattr(self, 'IsVisible',
lambda *args: True)
# Create the drawing bitmap
self.bitmap = wxc.EmptyBitmap(w, h)
DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w, h), 2, self)
# TODO: Add support for 'point' inspection and plot navigation.
self._isDrawn = False
self.Bind(wx.EVT_SIZE, self._onSize)
self.Bind(wx.EVT_PAINT, self._onPaint)
self.Bind(wx.EVT_KEY_DOWN, self._onKeyDown)
self.Bind(wx.EVT_KEY_UP, self._onKeyUp)
self.Bind(wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
self.Bind(wx.EVT_RIGHT_DCLICK, self._onRightButtonDClick)
self.Bind(wx.EVT_RIGHT_UP, self._onRightButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self._onMouseWheel)
self.Bind(wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
self.Bind(wx.EVT_LEFT_DCLICK, self._onLeftButtonDClick)
self.Bind(wx.EVT_LEFT_UP, self._onLeftButtonUp)
self.Bind(wx.EVT_MOTION, self._onMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self._onLeave)
self.Bind(wx.EVT_ENTER_WINDOW, self._onEnter)
self.Bind(wx.EVT_IDLE, self._onIdle)
# Add middle button events
self.Bind(wx.EVT_MIDDLE_DOWN, self._onMiddleButtonDown)
self.Bind(wx.EVT_MIDDLE_DCLICK, self._onMiddleButtonDClick)
self.Bind(wx.EVT_MIDDLE_UP, self._onMiddleButtonUp)
if wx.VERSION_STRING < "2.9":
# only needed in 2.8 to reduce flicker
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_ERASE_BACKGROUND, self._onEraseBackground)
else:
# this does the same in 2.9+
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
self.macros = {} # dict from wx id to seq of macros
def Destroy(self, *args, **kwargs):
wx.Panel.Destroy(self, *args, **kwargs)
def Copy_to_Clipboard(self, event=None):
"copy bitmap of canvas to system clipboard"
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
if not wx.TheClipboard.IsOpened():
open_success = wx.TheClipboard.Open()
if open_success:
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Close()
wx.TheClipboard.Flush()
def draw_idle(self):
"""
Delay rendering until the GUI is idle.
"""
DEBUG_MSG("draw_idle()", 1, self)
self._isDrawn = False # Force redraw
# Triggering a paint event is all that is needed to defer drawing
# until later. The platform will send the event when it thinks it is
# a good time (usually as soon as there are no other events pending).
self.Refresh(eraseBackground=False)
def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only
for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerWx(self, *args, **kwargs)
def flush_events(self):
wx.Yield()
def start_event_loop(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running.
"""
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
id = wx.NewId()
timer = wx.Timer(self, id=id)
if timeout > 0:
timer.Start(timeout * 1000, oneShot=True)
self.Bind(wx.EVT_TIMER, self.stop_event_loop, id=id)
# Event loop handler for start/stop event loop
self._event_loop = wxc.EventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
"""
if hasattr(self, '_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
'return the wildcard string for the filesave dialog'
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = sorted(filetypes.items())
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None, origin='WX'):
"""
Performs update of the displayed image on the GUI canvas, using the
supplied wx.PaintDC device context.
The 'WXAgg' backend sets origin accordingly.
"""
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if not drawDC:
# not called from OnPaint use a ClientDC
drawDC = wx.ClientDC(self)
# following is for 'WX' backend on Windows
# the bitmap can not be in use by another DC,
# see GraphicsContextWx._cache
if wx.Platform == '__WXMSW__' and origin == 'WX':
img = self.bitmap.ConvertToImage()
bmp = img.ConvertToBitmap()
drawDC.DrawBitmap(bmp, 0, 0)
else:
drawDC.DrawBitmap(self.bitmap, 0, 0)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['pcx'] = 'PCX'
filetypes['png'] = 'Portable Network Graphics'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
filetypes['xpm'] = 'X pixmap'
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasBase.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
def print_bmp(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
if not _has_pil:
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_JPEG,
*args, **kwargs)
print_jpg = print_jpeg
def print_pcx(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
if not _has_pil:
def print_tiff(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_TIF,
*args, **kwargs)
print_tif = print_tiff
def print_xpm(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
def _print_image(self, filename, filetype, *args, **kwargs):
origBitmap = self.bitmap
l, b, width, height = self.figure.bbox.bounds
width = int(math.ceil(width))
height = int(math.ceil(height))
self.bitmap = wxc.EmptyBitmap(width, height)
renderer = RendererWx(self.bitmap, self.figure.dpi)
gc = renderer.new_gc()
self.figure.draw(renderer)
# image is the object that we call SaveFile on.
image = self.bitmap
# set the JPEG quality appropriately. Unfortunately, it is only
# possible to set the quality on a wx.Image object. So if we
# are saving a JPEG, convert the wx.Bitmap to a wx.Image,
# and set the quality.
if filetype == wx.BITMAP_TYPE_JPEG:
jpeg_quality = kwargs.get('quality',
rcParams['savefig.jpeg_quality'])
image = self.bitmap.ConvertToImage()
image.SetOption(wx.IMAGE_OPTION_QUALITY, str(jpeg_quality))
# Now that we have rendered into the bitmap, save it
# to the appropriate file type and clean up
if is_string_like(filename):
if not image.SaveFile(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError(
'Could not save figure to %s\n' %
(filename))
elif is_writable_file_like(filename):
if not isinstance(image, wx.Image):
image = image.ConvertToImage()
if not image.SaveStream(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError(
'Could not save figure to %s\n' %
(filename))
# Restore everything to normal
self.bitmap = origBitmap
# Note: draw is required here since bits of state about the
# last renderer are strewn about the artist draw methods. Do
# not remove the draw without first verifying that these have
# been cleaned up. The artist contains() methods will fail
# otherwise.
if self._isDrawn:
self.draw()
self.Refresh()
def _onPaint(self, evt):
"""
Called when wxPaintEvt is generated
"""
DEBUG_MSG("_onPaint()", 1, self)
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
evt.Skip()
def _onEraseBackground(self, evt):
"""
Called when window is redrawn; since we are blitting the entire
image, we can leave this blank to suppress flicker.
"""
pass
def _onSize(self, evt):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
DEBUG_MSG("_onSize()", 2, self)
# Create a new, correctly sized bitmap
self._width, self._height = self.GetClientSize()
self.bitmap = wxc.EmptyBitmap(self._width, self._height)
self._isDrawn = False
if self._width <= 1 or self._height <= 1:
return # Empty figure
dpival = self.figure.dpi
winch = self._width / dpival
hinch = self._height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
FigureCanvasBase.resize_event(self)
def _get_key(self, evt):
keyval = evt.KeyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval < 256:
key = chr(keyval)
# wx always returns an uppercase, so make it lowercase if the shift
# key is not depressed (NOTE: this will not handle Caps Lock)
if not evt.ShiftDown():
key = key.lower()
else:
key = None
for meth, prefix in (
[evt.AltDown, 'alt'],
[evt.ControlDown, 'ctrl'], ):
if meth():
key = '{0}+{1}'.format(prefix, key)
return key
def _onIdle(self, evt):
'a GUI idle event'
evt.Skip()
FigureCanvasBase.idle_event(self, guiEvent=evt)
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
# print 'release key', key
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
def _onRightButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
def _onRightButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 3,
dblclick=True, guiEvent=evt)
def _onRightButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
if self.HasCapture():
self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
def _onLeftButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
def _onLeftButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1,
dblclick=True, guiEvent=evt)
def _onLeftButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# print 'release button', 1
evt.Skip()
if self.HasCapture():
self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
# Add middle button events
def _onMiddleButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 2, guiEvent=evt)
def _onMiddleButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 2,
dblclick=True, guiEvent=evt)
def _onMiddleButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# print 'release button', 1
evt.Skip()
if self.HasCapture():
self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 2, guiEvent=evt)
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
# print "delta,rotation,rate",delta,rotation,rate
step = rate * float(rotation) / delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self, '_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
def _onLeave(self, evt):
"""Mouse has left the window."""
evt.Skip()
FigureCanvasBase.leave_notify_event(self, guiEvent=evt)
def _onEnter(self, evt):
"""Mouse has entered the window."""
FigureCanvasBase.enter_notify_event(self, guiEvent=evt)
########################################################################
#
# The following functions and classes are for pylab compatibility
# mode (matplotlib.pylab) and implement figure managers, etc...
#
########################################################################
def _create_wx_app():
"""
Creates a wx.App instance if it has not been created sofar.
"""
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.App(False)
wxapp.SetExitOnFrameDelete(True)
# retain a reference to the app object so it does not get garbage
# collected and cause segmentation faults
_create_wx_app.theWxApp = wxapp
def draw_if_interactive():
"""
This should be overriden in a windowing environment if drawing
should be done in interactive python mode
"""
DEBUG_MSG("draw_if_interactive()", 1, None)
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
needmain = not wx.App.IsMainLoopRunning()
if needmain:
wxapp = wx.GetApp()
if wxapp is not None:
wxapp.MainLoop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
fig = figure
frame = FigureFrameWx(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
class FigureFrameWx(wx.Frame):
def __init__(self, num, fig):
# On non-Windows platform, explicitly set the position - fix
# positioning bug on some Linux platforms
if wx.Platform == '__WXMSW__':
pos = wx.DefaultPosition
else:
pos = wx.Point(20, 20)
l, b, w, h = fig.bbox.bounds
wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
title="Figure %d" % num)
# Frame will be sized later by the Fit method
DEBUG_MSG("__init__()", 1, self)
self.num = num
statbar = StatusBarWx(self)
self.SetStatusBar(statbar)
self.canvas = self.get_canvas(fig)
self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
self.canvas.SetFocus()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version
self.toolbar = self._get_toolbar(statbar)
if self.toolbar is not None:
self.toolbar.Realize()
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
if wxc.is_phoenix:
tw, th = self.toolbar.GetSize()
fw, fh = self.canvas.GetSize()
else:
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.canvas.SetMinSize((2, 2))
# give the window a matplotlib icon rather than the stock one.
# This is not currently working on Linux and is untested elsewhere.
# icon_path = os.path.join(matplotlib.rcParams['datapath'],
# 'images', 'matplotlib.png')
#icon = wx.IconFromBitmap(wx.Bitmap(icon_path))
# for xpm type icons try:
#icon = wx.Icon(icon_path, wx.BITMAP_TYPE_XPM)
# self.SetIcon(icon)
self.figmgr = FigureManagerWx(self.canvas, num, self)
self.Bind(wx.EVT_CLOSE, self._onClose)
def _get_toolbar(self, statbar):
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
def get_canvas(self, fig):
return FigureCanvasWx(self, -1, fig)
def get_figure_manager(self):
DEBUG_MSG("get_figure_manager()", 1, self)
return self.figmgr
def _onClose(self, evt):
DEBUG_MSG("onClose()", 1, self)
self.canvas.close_event()
self.canvas.stop_event_loop()
Gcf.destroy(self.num)
# self.Destroy()
def GetToolBar(self):
"""Override wxFrame::GetToolBar as we don't have managed toolbar"""
return self.toolbar
def Destroy(self, *args, **kwargs):
try:
self.canvas.mpl_disconnect(self.toolbar._idDrag)
# Rationale for line above: see issue 2941338.
except AttributeError:
pass # classic toolbar lacks the attribute
if not self.IsBeingDeleted():
wx.Frame.Destroy(self, *args, **kwargs)
if self.toolbar is not None:
self.toolbar.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
return True
class FigureManagerWx(FigureManagerBase):
"""
This class contains the FigureCanvas and GUI frame
It is instantiated by GcfWx whenever a new figure is created. GcfWx is
responsible for managing multiple instances of FigureManagerWx.
public attrs
canvas - a FigureCanvasWx(wx.Panel) instance
window - a wxFrame instance - wxpython.org/Phoenix/docs/html/Frame.html
"""
def __init__(self, canvas, num, frame):
DEBUG_MSG("__init__()", 1, self)
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.tb = frame.GetToolBar()
self.toolbar = self.tb # consistent with other backends
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.tb is not None:
self.tb.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def show(self):
self.frame.Show()
self.canvas.draw()
def destroy(self, *args):
DEBUG_MSG("destroy()", 1, self)
self.frame.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
def get_window_title(self):
return self.window.GetTitle()
def set_window_title(self, title):
self.window.SetTitle(title)
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window)
# Identifiers for toolbar controls - images_wx contains bitmaps for the images
# used in the controls. wxWindows does not provide any stock images, so I've
# 'stolen' those from GTK2, and transformed them into the appropriate format.
#import images_wx
_NTB_AXISMENU = wx.NewId()
_NTB_AXISMENU_BUTTON = wx.NewId()
_NTB_X_PAN_LEFT = wx.NewId()
_NTB_X_PAN_RIGHT = wx.NewId()
_NTB_X_ZOOMIN = wx.NewId()
_NTB_X_ZOOMOUT = wx.NewId()
_NTB_Y_PAN_UP = wx.NewId()
_NTB_Y_PAN_DOWN = wx.NewId()
_NTB_Y_ZOOMIN = wx.NewId()
_NTB_Y_ZOOMOUT = wx.NewId()
#_NTB_SUBPLOT =wx.NewId()
_NTB_SAVE = wx.NewId()
_NTB_CLOSE = wx.NewId()
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'], 'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying' % bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp
class MenuButtonWx(wx.Button):
"""
wxPython does not permit a menu to be incorporated directly into a toolbar.
This class simulates the effect by associating a pop-up menu with a button
in the toolbar, and managing this as though it were a menu.
"""
def __init__(self, parent):
wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ",
style=wx.BU_EXACTFIT)
self._toolbar = parent
self._menu = wx.Menu()
self._axisId = []
# First two menu items never change...
self._allId = wx.NewId()
self._invertId = wx.NewId()
self._menu.Append(self._allId, "All", "Select all axes", False)
self._menu.Append(self._invertId, "Invert", "Invert axes selected",
False)
self._menu.AppendSeparator()
self.Bind(wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON)
self.Bind(wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
self.Bind(wx.EVT_MENU, self._handleInvertAxesSelected,
id=self._invertId)
def Destroy(self):
self._menu.Destroy()
self.Destroy()
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
if wxc.is_phoenix:
x, y = self.GetPosition()
w, h = self.GetSize()
else:
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y + h - 4)
# When menu returned, indicate selection in button
evt.Skip()
def _handleSelectAllAxes(self, evt):
"""Called when the 'select all axes' menu item is selected."""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _handleInvertAxesSelected(self, evt):
"""Called when the invert all menu item is selected"""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
self._menu.Check(self._axisId[i], False)
else:
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
# Lines above would be deleted based on svn tracker ID 2841525;
# not clear whether this matters or not.
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def updateAxes(self, maxAxis):
"""Ensures that there are entries for max_axis axes in the menu
(selected by default)."""
if maxAxis > len(self._axisId):
for i in range(len(self._axisId) + 1, maxAxis + 1, 1):
menuId = wx.NewId()
self._axisId.append(menuId)
self._menu.Append(menuId, "Axis %d" % i,
"Select axis %d" % i,
True)
self._menu.Check(menuId, True)
self.Bind(wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
elif maxAxis < len(self._axisId):
for menuId in self._axisId[maxAxis:]:
self._menu.Delete(menuId)
self._axisId = self._axisId[:maxAxis]
self._toolbar.set_active(list(xrange(maxAxis)))
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = []
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
active.append(i)
return active
def updateButtonText(self, lst):
"""Update the list of selected axes in the menu button"""
axis_txt = ''
for e in lst:
axis_txt += '%d,' % (e + 1)
# remove trailing ',' and add to button string
self.SetLabel("Axes: %s" % axis_txt[:-1])
cursord = {
cursors.MOVE: wx.CURSOR_HAND,
cursors.HAND: wx.CURSOR_HAND,
cursors.POINTER: wx.CURSOR_ARROW,
cursors.SELECT_REGION: wx.CURSOR_CROSS,
}
class SubplotToolWX(wx.Frame):
def __init__(self, targetfig):
wx.Frame.__init__(self, None, -1, "Configure subplots")
toolfig = Figure((6, 3))
canvas = FigureCanvasWx(self, -1, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, self)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(sizer)
self.Fit()
tool = SubplotTool(targetfig, toolfig)
class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas):
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
NavigationToolbar2.__init__(self, canvas)
self.canvas = canvas
self._idle = True
self.statbar = None
self.prevZoomRect = None
# for now, use alternate zoom-rectangle drawing on all
# Macs. N.B. In future versions of wx it may be possible to
# detect Retina displays with window.GetContentScaleFactor()
# and/or dc.GetContentScaleFactor()
self.retinaFix = 'wxMac' in wx.PlatformInfo
def get_canvas(self, frame, fig):
return FigureCanvasWx(frame, -1, fig)
def _init_toolbar(self):
DEBUG_MSG("_init_toolbar", 1, self)
self._parent = self.canvas.GetParent()
self.wx_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.AddSeparator()
continue
self.wx_ids[text] = wx.NewId()
wxc._AddTool(self, self.wx_ids, text,
_load_bitmap(image_file + '.png'),
tooltip_text)
self.Bind(wx.EVT_TOOL, getattr(self, callback),
id=self.wx_ids[text])
self.Realize()
def zoom(self, *args):
self.ToggleTool(self.wx_ids['Pan'], False)
NavigationToolbar2.zoom(self, *args)
def pan(self, *args):
self.ToggleTool(self.wx_ids['Zoom'], False)
NavigationToolbar2.pan(self, *args)
def configure_subplots(self, evt):
frame = wx.Frame(None, -1, "Configure subplots")
toolfig = Figure((6, 3))
canvas = self.get_canvas(frame, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, frame)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
tool = SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def save_figure(self, *args):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = self.canvas.get_default_filename()
dlg = wx.FileDialog(self._parent, "Save to file", "", default_file,
filetypes,
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(filter_index)
if dlg.ShowModal() == wx.ID_OK:
dirname = dlg.GetDirectory()
filename = dlg.GetFilename()
DEBUG_MSG(
'Save file dir:%s name:%s' %
(dirname, filename), 3, self)
format = exts[dlg.GetFilterIndex()]
basename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format != ext:
# looks like they forgot to set the image type drop
# down, going with the extension.
warnings.warn(
'extension %s did not match the selected '
'image type %s; going with %s' %
(ext, format, ext), stacklevel=0)
format = ext
try:
self.canvas.print_figure(
os.path.join(dirname, filename), format=format)
except Exception as e:
error_msg_wx(str(e))
def set_cursor(self, cursor):
cursor = wxc.Cursor(cursord[cursor])
self.canvas.SetCursor(cursor)
def release(self, event):
try:
del self.lastrect
except AttributeError:
pass
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def press(self, event):
if self._active == 'ZOOM':
if not self.retinaFix:
self.wxoverlay = wx.Overlay()
else:
self.savedRetinaImage = self.canvas.copy_from_bbox(
self.canvas.figure.gca().bbox)
self.zoomStartX = event.xdata
self.zoomStartY = event.ydata
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
if not self.retinaFix:
self.wxoverlay.Reset()
del self.wxoverlay
else:
del self.savedRetinaImage
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.prevZoomRect = None
def draw_rubberband(self, event, x0, y0, x1, y1):
if self.retinaFix: # On Macs, use the following code
# wx.DCOverlay does not work properly on Retina displays.
rubberBandColor = '#C0C0FF'
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.canvas.restore_region(self.savedRetinaImage)
X0, X1 = self.zoomStartX, event.xdata
Y0, Y1 = self.zoomStartY, event.ydata
lineX = (X0, X0, X1, X1, X0)
lineY = (Y0, Y1, Y1, Y0, Y0)
self.prevZoomRect = self.canvas.figure.gca().plot(
lineX, lineY, '-', color=rubberBandColor)
self.canvas.figure.gca().draw_artist(self.prevZoomRect[0])
self.canvas.blit(self.canvas.figure.gca().bbox)
return
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1 < y0:
y0, y1 = y1, y0
if x1 < y0:
x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wxc.NamedColour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b, a = color.Get(True)
color.Set(r, g, b, 0x60)
dc.SetBrush(wx.Brush(color))
if wxc.is_phoenix:
dc.DrawRectangle(rect)
else:
dc.DrawRectangleRect(rect)
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None:
self.statbar.set_function(s)
def set_history_buttons(self):
can_backward = (self._views._pos > 0)
can_forward = (self._views._pos < len(self._views._elements) - 1)
self.EnableTool(self.wx_ids['Back'], can_backward)
self.EnableTool(self.wx_ids['Forward'], can_forward)
class StatusBarWx(wx.StatusBar):
"""
A status bar is added to _FigureFrame to allow measurements and the
previously selected scroll function to be displayed as a user
convenience.
"""
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(2)
self.SetStatusText("None", 1)
#self.SetStatusText("Measurement: None", 2)
# self.Reposition()
def set_function(self, string):
self.SetStatusText("%s" % string, 1)
# def set_measurement(self, string):
# self.SetStatusText("Measurement: %s" % string, 2)
#< Additions for printing support: Matt Newville
class PrintoutWx(wx.Printout):
"""
Simple wrapper around wx Printout class -- all the real work
here is scaling the matplotlib canvas bitmap to the current
printer's definition.
"""
def __init__(self, canvas, width=5.5, margin=0.5, title='matplotlib'):
wx.Printout.__init__(self, title=title)
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = width
self.margin = margin
def HasPage(self, page):
# current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
(ppw, pph) = self.GetPPIPrinter() # printer's pixels per in
(pgw, pgh) = self.GetPageSizePixels() # page size in pixels
(dcw, dch) = dc.GetSize()
if wxc.is_phoenix:
(grw, grh) = self.canvas.GetSize()
else:
(grw, grh) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth(
int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight(
int(self.canvas.bitmap.GetHeight() * vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview():
page_scale = float(dcw) / pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale) / float(grw)
dc.SetDeviceOrigin(left_margin, top_margin)
dc.SetUserScale(user_scale, user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
pass
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
#>
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasWx
FigureManager = FigureManagerWx
Toolbar = NavigationToolbar2Wx
| gpl-3.0 |
JackWalpole/splitwavepy | splitwavepy/eigval/eigval3d.py | 1 | 4907 | # -*- coding: utf-8 -*-
"""
The eigenvalue method of Silver and Chan (1991)
Low level routines works on numpy arrays and shifts using samples (doesn't know about time)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..core import core, core3d, geom
from ..core.window import Window
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal, stats
# Silver and Chan in 3-dimensions
# 3 eigenvalues
# maximize raio: lam1 / (lam2 * lam3)
# explore parameter space by iterating:
# 3-rotate to fast direction, slow-direction, shear-front normal.
# algorithm:
# 1) guess shear-front normal direction.
# 2) correct data
# 3) rotate
# another parameter:
# ray direction (lam3)
# maximise: lam1/(lam2-lam3)
# ray direction (eigvec3), phi (eigvec1), slow (eigvec2), dt
# in practice there is noise and we can adjust these ratios to scale with SNR
# 2-D maximise (lam1-lam2/lam2) in 3-D?
# lam1 + noise / lam2 + lam3 = signal + noise / noise - noise
# multi-windowed splitting
def eigvalcov(data):
"""
return sorted eigenvalues of covariance matrix
lambda1 first, lambda2 second
"""
return np.sort(np.linalg.eigvalsh(np.cov(data)))
def eigcov(data):
"""
Return eigen values and vectors of covariance matrix
"""
eigenValues, eigenVectors = np.linalg.eig(np.cov(data))
idx = eigenValues.argsort()[::-1]
eigenValues = eigenValues[idx]
eigenVectors = eigenVectors[:,idx]
return eigenValues, eigenVectors
def vstack(x,y,z):
return np.vstack((x,y,z))
def grideigval(x, y, z, degs, slags, window, **kwargs):
"""
Grid search for splitting parameters applied to data.
lags = 1-D array of sample shifts to search over, if None an attempt at finding sensible values is made
degs = 1-D array of rotations to search over, if None an attempt at finding sensible values is made
window = Window object (if None will guess an appropriate window)
rcvcorr = receiver correction parameters in tuple (fast,lag)
srccorr = source correction parameters in tuple (fast,lag)
"""
# grid of degs and lags to search over
degs, lags = np.meshgrid(degs,slags)
shape = degs.shape
lam1 = np.zeros(shape)
lam2 = np.zeros(shape)
lam3 = np.zeros(shape)
v1 = np.zeros(shape + (3,))
v2 = np.zeros(shape + (3,))
v3 = np.zeros(shape + (3,))
# avoid using "dots" in loops for performance
rotate = core3d.rotate
lag = core3d.lag
chop = core3d.chop
# pre-apply receiver correction
if 'rcvcorr' in kwargs:
x,y,z = core3d.unsplit(x,y,z,*kwargs['rcvcorr'])
# make function to do source correction (used in loop)
if 'srccorr' in kwargs:
srcphi, srclag = kwargs['srccorr']
def srccorr(x,y,z,ang):
# unwind rotation
x,y,z = rotate(x,y,z,srcphi-ang)
# remove splitting
x,y,z = lag(x,y,z,-srclag)
return x,y,z
else:
def srccorr(x,y,z,ang):
# no source correction so do nothing
return x,y,z
for ii in np.arange(shape[1]):
tx, ty, tz = rotate(x,y,z,degs[0,ii])
for jj in np.arange(shape[0]):
# remove splitting so use inverse operator (negative lag)
ux, uy, uz = lag( tx, ty, tz, -lags[jj,ii])
# if requested -- post-apply source correction
ux, uy, uz = srccorr( ux, uy, uz, degs[0,ii])
# chop to analysis window
ux, uy, uz = chop( ux, uy, uz, window=window)
# measure eigenvalues of covariance matrix
lam3[jj,ii], lam2[jj,ii], lam1[jj,ii] = eigvalcov(np.vstack((ux,uy,uz)))
return degs,lags,lam1,lam2,lam3
def ndf(y,window=None,detrend=False):
"""
Estimates number of degrees of freedom using noise trace y.
Uses the improvement found by Walsh et al (2013).
"""
if detrend is True:
# ensure no trend on the noise trace
y = signal.detrend(y)
if window is not None:
# chop trace to window limits
y = core.chop(y,window=window)
Y = np.fft.fft(y)
amp = np.absolute(Y)
# estimate E2 and E4 following Walsh et al (2013)
a = np.ones(Y.size)
a[0] = a[-1] = 0.5
E2 = np.sum( a * amp**2)
E4 = (np.sum( (4 * a**2 / 3) * amp**4))
ndf = 2 * ( 2 * E2**2 / E4 - 1 )
return ndf
def ftest(lam2,ndf,alpha=0.05):
"""
returns lambda2 value at 100(1-alpha)% confidence interval
by default alpha = 0.05 = 95% confidence interval
following Silver and Chan (1991)
"""
lam2min = lam2.min()
k = 2 # two parameters, phi and dt.
# R = ((lam2 - lam2min)/k) / (lam2min/(ndf-k))
F = stats.f.ppf(1-alpha,k,ndf)
lam2alpha = lam2min * ( 1 + (k/(ndf-k)) * F)
return lam2alpha
| mit |
idealabasu/code_pynamics | python/pynamics_examples/old/pendulum1_mod.py | 1 | 2499 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
from math import pi
#import sympy
from danamics import *
#import matplotlib.pyplot as plt
#from rungekutta import *
#from testcode import *
import numpy
import scipy
import scipy.integrate
from tictoc import *
import matplotlib.pyplot as plt
#import readstl
#===============================================================================
system = dynsystem()
constant('lA',.04,system)
constant('lB',.04,system)
constant('g',9.81,system)
constant('mA',.0145,system)
constant('zero',0,system)
constant('Ixx_A',8.6e-007,system)
constant('Iyy_A',2.2e-006,system)
constant('Izz_A',2.2e-006,system)
constant('b',0.00001,system)
constant('k',0.0,system)
sympy.Symbol('fx')
accelerationvariable('qA',system)
accelerationvariable('qB',system)
accelerationvariable('qC',system)
accelerationvariable('qD',system)
accelerationvariable('qE',system)
initialvalues = {}
initialvalues[qA]=30*pi/180
initialvalues[qA_d]=0*pi/180
statevariables = system.q+system.q_d
ini = [item.subs(initialvalues) for item in statevariables]
frame('N',system)
frame('A',system)
frame('B',system)
frame('C',system)
frame('D',system)
frame('E',system)
N.setnewtonian()
A.RotateBodyZ(N,qA)
B.RotateBodyY(A,qB)
C.RotateBodyZ(B,qC)
D.RotateBodyZ(C,qD)
E.RotateBodyX(D,qE)
pos = C.x
tic()
v = vectorderivative(E.x,N,system)
toc()
tic()
v = vectorderivative(E.x,N,system)
toc()
print(v)
#
##A.setpathtonewtonian(['A','N'])
#
#pNA=zero*N.x+zero*N.y+zero*N.z
#pAB=pNA+lA*A.x
#pAcm=pNA+lA/2*A.x
#
#wNA = angularvelocityN(N,A)
#
#BodyA = body('BodyA',A,pAcm,mA,I_generic(A,Ixx_A,Iyy_A,Izz_A))
#
#system.addforce(-b*wNA,wNA)
#system.addforce(-k*qA*N.z,wNA)
#system.addforcegravity(-g*N.y)
#
#t = scipy.arange(0,10,.01)
#
#tic()
#print 'solving dynamics...'
#var_dd = solvedynamics('LU',False)
#toc()
#print 'integrating...'
#var_dd=var_dd.subs(system.constants)
#func1 = createsecondorderfunction(var_dd,statevariables,system.q_d,func_format = 'odeint')
#states=scipy.integrate.odeint(func1,ini,t,rtol=1e-8,atol=1e-8)
#toc()
#print 'calculating outputs..'
#x1 = dot(BodyA.pCM,N.x)
#y1 = dot(BodyA.pCM,N.y)
#KE = system.KE
#PE = system.getPEGravity(pNA)
#outputs = outputclass([x1,y1,KE,PE])
#outputs.calc(statevariables,states)
#toc()
#
#plt.figure(1)
#plt.plot(outputs(x1),outputs(y1))
##plt.plot(outputs_array[:,6],outputs_array[:,7])
#
#plt.figure(2)
#plt.plot(t,outputs(KE)-outputs(PE))
#plt.show()
| mit |
sargas/scipy | doc/source/tutorial/examples/newton_krylov_preconditioning.py | 99 | 2489 | import numpy as np
from scipy.optimize import root
from scipy.sparse import spdiags, kron
from scipy.sparse.linalg import spilu, LinearOperator
from numpy import cosh, zeros_like, mgrid, zeros, eye
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M
def solve(preconditioning=True):
"""Compute the solution"""
count = [0]
def residual(P):
count[0] += 1
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*cosh(P).mean()**2
# preconditioner
if preconditioning:
M = get_preconditioner()
else:
M = None
# solve
guess = zeros((nx, ny), float)
sol = root(residual, guess, method='krylov',
options={'disp': True,
'jac_options': {'inner_M': M}})
print 'Residual', abs(residual(sol.x)).max()
print 'Evaluations', count[0]
return sol.x
def main():
sol = solve(preconditioning=True)
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.clf()
plt.pcolor(x, y, sol)
plt.clim(0, 1)
plt.colorbar()
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
majetideepak/arrow | python/pyarrow/tests/strategies.py | 1 | 8245 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytz
import hypothesis as h
import hypothesis.strategies as st
import hypothesis.extra.numpy as npst
import hypothesis.extra.pytz as tzst
import numpy as np
import pyarrow as pa
# TODO(kszucs): alphanum_text, surrogate_text
custom_text = st.text(
alphabet=st.characters(
min_codepoint=0x41,
max_codepoint=0x7E
)
)
null_type = st.just(pa.null())
bool_type = st.just(pa.bool_())
binary_type = st.just(pa.binary())
string_type = st.just(pa.string())
large_binary_type = st.just(pa.large_binary())
large_string_type = st.just(pa.large_string())
signed_integer_types = st.sampled_from([
pa.int8(),
pa.int16(),
pa.int32(),
pa.int64()
])
unsigned_integer_types = st.sampled_from([
pa.uint8(),
pa.uint16(),
pa.uint32(),
pa.uint64()
])
integer_types = st.one_of(signed_integer_types, unsigned_integer_types)
floating_types = st.sampled_from([
pa.float16(),
pa.float32(),
pa.float64()
])
decimal_type = st.builds(
pa.decimal128,
precision=st.integers(min_value=1, max_value=38),
scale=st.integers(min_value=1, max_value=38)
)
numeric_types = st.one_of(integer_types, floating_types, decimal_type)
date_types = st.sampled_from([
pa.date32(),
pa.date64()
])
time_types = st.sampled_from([
pa.time32('s'),
pa.time32('ms'),
pa.time64('us'),
pa.time64('ns')
])
timestamp_types = st.builds(
pa.timestamp,
unit=st.sampled_from(['s', 'ms', 'us', 'ns']),
tz=tzst.timezones()
)
temporal_types = st.one_of(date_types, time_types, timestamp_types)
primitive_types = st.one_of(
null_type,
bool_type,
binary_type,
string_type,
large_binary_type,
large_string_type,
numeric_types,
temporal_types
)
metadata = st.dictionaries(st.text(), st.text())
def fields(type_strategy=primitive_types):
return st.builds(pa.field, name=custom_text, type=type_strategy,
nullable=st.booleans(), metadata=metadata)
def list_types(item_strategy=primitive_types):
return (
st.builds(pa.list_, item_strategy) |
st.builds(pa.large_list, item_strategy)
)
def struct_types(item_strategy=primitive_types):
return st.builds(pa.struct, st.lists(fields(item_strategy)))
def complex_types(inner_strategy=primitive_types):
return list_types(inner_strategy) | struct_types(inner_strategy)
def nested_list_types(item_strategy=primitive_types, max_leaves=3):
return st.recursive(item_strategy, list_types, max_leaves=max_leaves)
def nested_struct_types(item_strategy=primitive_types, max_leaves=3):
return st.recursive(item_strategy, struct_types, max_leaves=max_leaves)
def nested_complex_types(inner_strategy=primitive_types, max_leaves=3):
return st.recursive(inner_strategy, complex_types, max_leaves=max_leaves)
def schemas(type_strategy=primitive_types, max_fields=None):
children = st.lists(fields(type_strategy), max_size=max_fields)
return st.builds(pa.schema, children)
complex_schemas = schemas(complex_types())
all_types = st.one_of(primitive_types, complex_types(), nested_complex_types())
all_fields = fields(all_types)
all_schemas = schemas(all_types)
_default_array_sizes = st.integers(min_value=0, max_value=20)
@st.composite
def arrays(draw, type, size=None):
if isinstance(type, st.SearchStrategy):
type = draw(type)
elif not isinstance(type, pa.DataType):
raise TypeError('Type must be a pyarrow DataType')
if isinstance(size, st.SearchStrategy):
size = draw(size)
elif size is None:
size = draw(_default_array_sizes)
elif not isinstance(size, int):
raise TypeError('Size must be an integer')
shape = (size,)
if pa.types.is_list(type) or pa.types.is_large_list(type):
offsets = draw(npst.arrays(np.uint8(), shape=shape)).cumsum() // 20
offsets = np.insert(offsets, 0, 0, axis=0) # prepend with zero
values = draw(arrays(type.value_type, size=int(offsets.sum())))
array_type = (
pa.LargeListArray if pa.types.is_large_list(type)
else pa.ListArray)
return array_type.from_arrays(offsets, values)
if pa.types.is_struct(type):
h.assume(len(type) > 0)
fields, child_arrays = [], []
for field in type:
fields.append(field)
child_arrays.append(draw(arrays(field.type, size=size)))
return pa.StructArray.from_arrays(child_arrays, fields=fields)
if (pa.types.is_boolean(type) or pa.types.is_integer(type) or
pa.types.is_floating(type)):
values = npst.arrays(type.to_pandas_dtype(), shape=(size,))
np_arr = draw(values)
if pa.types.is_floating(type):
# Workaround ARROW-4952: no easy way to assert array equality
# in a NaN-tolerant way.
np_arr[np.isnan(np_arr)] = -42.0
return pa.array(np_arr, type=type)
if pa.types.is_null(type):
value = st.none()
elif pa.types.is_time(type):
value = st.times()
elif pa.types.is_date(type):
value = st.dates()
elif pa.types.is_timestamp(type):
tz = pytz.timezone(type.tz) if type.tz is not None else None
value = st.datetimes(timezones=st.just(tz))
elif pa.types.is_binary(type) or pa.types.is_large_binary(type):
value = st.binary()
elif pa.types.is_string(type) or pa.types.is_large_string(type):
value = st.text()
elif pa.types.is_decimal(type):
# TODO(kszucs): properly limit the precision
# value = st.decimals(places=type.scale, allow_infinity=False)
h.reject()
else:
raise NotImplementedError(type)
values = st.lists(value, min_size=size, max_size=size)
return pa.array(draw(values), type=type)
@st.composite
def chunked_arrays(draw, type, min_chunks=0, max_chunks=None, chunk_size=None):
if isinstance(type, st.SearchStrategy):
type = draw(type)
# TODO(kszucs): remove it, field metadata is not kept
h.assume(not pa.types.is_struct(type))
chunk = arrays(type, size=chunk_size)
chunks = st.lists(chunk, min_size=min_chunks, max_size=max_chunks)
return pa.chunked_array(draw(chunks), type=type)
@st.composite
def record_batches(draw, type, rows=None, max_fields=None):
if isinstance(rows, st.SearchStrategy):
rows = draw(rows)
elif rows is None:
rows = draw(_default_array_sizes)
elif not isinstance(rows, int):
raise TypeError('Rows must be an integer')
schema = draw(schemas(type, max_fields=max_fields))
children = [draw(arrays(field.type, size=rows)) for field in schema]
# TODO(kszucs): the names and schame arguments are not consistent with
# Table.from_array's arguments
return pa.RecordBatch.from_arrays(children, names=schema)
@st.composite
def tables(draw, type, rows=None, max_fields=None):
if isinstance(rows, st.SearchStrategy):
rows = draw(rows)
elif rows is None:
rows = draw(_default_array_sizes)
elif not isinstance(rows, int):
raise TypeError('Rows must be an integer')
schema = draw(schemas(type, max_fields=max_fields))
children = [draw(arrays(field.type, size=rows)) for field in schema]
return pa.Table.from_arrays(children, schema=schema)
all_arrays = arrays(all_types)
all_chunked_arrays = chunked_arrays(all_types)
all_record_batches = record_batches(all_types)
all_tables = tables(all_types)
| apache-2.0 |
vybstat/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/matplotlib/_mathtext_data.py | 69 | 57988 | """
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
r'\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
r'\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
r'\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
r'\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\prime' : ('cmsy10', 73),
r"'" : ('cmsy10', 73),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
r'\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
r'\updownarrow' : ('cmsy10', 94),
r'\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
r'\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
r'\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
r'\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
r'\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0136),
r'\Omega' : ('psyr', 0127),
r'\leftbracket' : ('psyr', 0133),
r'\rightbracket' : ('psyr', 0135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 050),
r'\prime' : ('psyr', 0242),
r'\sharp' : ('psyr', 043),
r'\slash' : ('psyr', 057),
r'\Lamda' : ('psyr', 0114),
r'\neg' : ('psyr', 0330),
r'\Upsilon' : ('psyr', 0241),
r'\rightbrace' : ('psyr', 0175),
r'\rfloor' : ('psyr', 0373),
r'\lambda' : ('psyr', 0154),
r'\to' : ('psyr', 0256),
r'\Xi' : ('psyr', 0130),
r'\emptyset' : ('psyr', 0306),
r'\lfloor' : ('psyr', 0353),
r'\rightparen' : ('psyr', 051),
r'\rceil' : ('psyr', 0371),
r'\ni' : ('psyr', 047),
r'\epsilon' : ('psyr', 0145),
r'\Theta' : ('psyr', 0121),
r'\langle' : ('psyr', 0341),
r'\leftangle' : ('psyr', 0341),
r'\rangle' : ('psyr', 0361),
r'\rightangle' : ('psyr', 0361),
r'\rbrace' : ('psyr', 0175),
r'\circ' : ('psyr', 0260),
r'\diamond' : ('psyr', 0340),
r'\mu' : ('psyr', 0155),
r'\mid' : ('psyr', 0352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {'uni24C8': 9416,
'aring': 229,
'uni22A0': 8864,
'uni2292': 8850,
'quotedblright': 8221,
'uni03D2': 978,
'uni2215': 8725,
'uni03D0': 976,
'V': 86,
'dollar': 36,
'uni301E': 12318,
'uni03D5': 981,
'four': 52,
'uni25A0': 9632,
'uni013C': 316,
'uni013B': 315,
'uni013E': 318,
'Yacute': 221,
'uni25DE': 9694,
'uni013F': 319,
'uni255A': 9562,
'uni2606': 9734,
'uni0180': 384,
'uni22B7': 8887,
'uni044F': 1103,
'uni22B5': 8885,
'uni22B4': 8884,
'uni22AE': 8878,
'uni22B2': 8882,
'uni22B1': 8881,
'uni22B0': 8880,
'uni25CD': 9677,
'uni03CE': 974,
'uni03CD': 973,
'uni03CC': 972,
'uni03CB': 971,
'uni03CA': 970,
'uni22B8': 8888,
'uni22C9': 8905,
'uni0449': 1097,
'uni20DD': 8413,
'uni20DC': 8412,
'uni20DB': 8411,
'uni2231': 8753,
'uni25CF': 9679,
'uni306E': 12398,
'uni03D1': 977,
'uni01A1': 417,
'uni20D7': 8407,
'uni03D6': 982,
'uni2233': 8755,
'uni20D2': 8402,
'uni20D1': 8401,
'uni20D0': 8400,
'P': 80,
'uni22BE': 8894,
'uni22BD': 8893,
'uni22BC': 8892,
'uni22BB': 8891,
'underscore': 95,
'uni03C8': 968,
'uni03C7': 967,
'uni0328': 808,
'uni03C5': 965,
'uni03C4': 964,
'uni03C3': 963,
'uni03C2': 962,
'uni03C1': 961,
'uni03C0': 960,
'uni2010': 8208,
'uni0130': 304,
'uni0133': 307,
'uni0132': 306,
'uni0135': 309,
'uni0134': 308,
'uni0137': 311,
'uni0136': 310,
'uni0139': 313,
'uni0138': 312,
'uni2244': 8772,
'uni229A': 8858,
'uni2571': 9585,
'uni0278': 632,
'uni2239': 8761,
'p': 112,
'uni3019': 12313,
'uni25CB': 9675,
'uni03DB': 987,
'uni03DC': 988,
'uni03DA': 986,
'uni03DF': 991,
'uni03DD': 989,
'uni013D': 317,
'uni220A': 8714,
'uni220C': 8716,
'uni220B': 8715,
'uni220E': 8718,
'uni220D': 8717,
'uni220F': 8719,
'uni22CC': 8908,
'Otilde': 213,
'uni25E5': 9701,
'uni2736': 10038,
'perthousand': 8240,
'zero': 48,
'uni279B': 10139,
'dotlessi': 305,
'uni2279': 8825,
'Scaron': 352,
'zcaron': 382,
'uni21D8': 8664,
'egrave': 232,
'uni0271': 625,
'uni01AA': 426,
'uni2332': 9010,
'section': 167,
'uni25E4': 9700,
'Icircumflex': 206,
'ntilde': 241,
'uni041E': 1054,
'ampersand': 38,
'uni041C': 1052,
'uni041A': 1050,
'uni22AB': 8875,
'uni21DB': 8667,
'dotaccent': 729,
'uni0416': 1046,
'uni0417': 1047,
'uni0414': 1044,
'uni0415': 1045,
'uni0412': 1042,
'uni0413': 1043,
'degree': 176,
'uni0411': 1041,
'K': 75,
'uni25EB': 9707,
'uni25EF': 9711,
'uni0418': 1048,
'uni0419': 1049,
'uni2263': 8803,
'uni226E': 8814,
'uni2251': 8785,
'uni02C8': 712,
'uni2262': 8802,
'acircumflex': 226,
'uni22B3': 8883,
'uni2261': 8801,
'uni2394': 9108,
'Aring': 197,
'uni2260': 8800,
'uni2254': 8788,
'uni0436': 1078,
'uni2267': 8807,
'k': 107,
'uni22C8': 8904,
'uni226A': 8810,
'uni231F': 8991,
'smalltilde': 732,
'uni2201': 8705,
'uni2200': 8704,
'uni2203': 8707,
'uni02BD': 701,
'uni2205': 8709,
'uni2204': 8708,
'Agrave': 192,
'uni2206': 8710,
'uni2209': 8713,
'uni2208': 8712,
'uni226D': 8813,
'uni2264': 8804,
'uni263D': 9789,
'uni2258': 8792,
'uni02D3': 723,
'uni02D2': 722,
'uni02D1': 721,
'uni02D0': 720,
'uni25E1': 9697,
'divide': 247,
'uni02D5': 725,
'uni02D4': 724,
'ocircumflex': 244,
'uni2524': 9508,
'uni043A': 1082,
'uni24CC': 9420,
'asciitilde': 126,
'uni22B9': 8889,
'uni24D2': 9426,
'uni211E': 8478,
'uni211D': 8477,
'uni24DD': 9437,
'uni211A': 8474,
'uni211C': 8476,
'uni211B': 8475,
'uni25C6': 9670,
'uni017F': 383,
'uni017A': 378,
'uni017C': 380,
'uni017B': 379,
'uni0346': 838,
'uni22F1': 8945,
'uni22F0': 8944,
'two': 50,
'uni2298': 8856,
'uni24D1': 9425,
'E': 69,
'uni025D': 605,
'scaron': 353,
'uni2322': 8994,
'uni25E3': 9699,
'uni22BF': 8895,
'F': 70,
'uni0440': 1088,
'uni255E': 9566,
'uni22BA': 8890,
'uni0175': 373,
'uni0174': 372,
'uni0177': 375,
'uni0176': 374,
'bracketleft': 91,
'uni0170': 368,
'uni0173': 371,
'uni0172': 370,
'asciicircum': 94,
'uni0179': 377,
'uni2590': 9616,
'uni25E2': 9698,
'uni2119': 8473,
'uni2118': 8472,
'uni25CC': 9676,
'f': 102,
'ordmasculine': 186,
'uni229B': 8859,
'uni22A1': 8865,
'uni2111': 8465,
'uni2110': 8464,
'uni2113': 8467,
'uni2112': 8466,
'mu': 181,
'uni2281': 8833,
'paragraph': 182,
'nine': 57,
'uni25EC': 9708,
'v': 118,
'uni040C': 1036,
'uni0113': 275,
'uni22D0': 8912,
'uni21CC': 8652,
'uni21CB': 8651,
'uni21CA': 8650,
'uni22A5': 8869,
'uni21CF': 8655,
'uni21CE': 8654,
'uni21CD': 8653,
'guilsinglleft': 8249,
'backslash': 92,
'uni2284': 8836,
'uni224E': 8782,
'uni224D': 8781,
'uni224F': 8783,
'uni224A': 8778,
'uni2287': 8839,
'uni224C': 8780,
'uni224B': 8779,
'uni21BD': 8637,
'uni2286': 8838,
'uni030F': 783,
'uni030D': 781,
'uni030E': 782,
'uni030B': 779,
'uni030C': 780,
'uni030A': 778,
'uni026E': 622,
'uni026D': 621,
'six': 54,
'uni026A': 618,
'uni026C': 620,
'uni25C1': 9665,
'uni20D6': 8406,
'uni045B': 1115,
'uni045C': 1116,
'uni256B': 9579,
'uni045A': 1114,
'uni045F': 1119,
'uni045E': 1118,
'A': 65,
'uni2569': 9577,
'uni0458': 1112,
'uni0459': 1113,
'uni0452': 1106,
'uni0453': 1107,
'uni2562': 9570,
'uni0451': 1105,
'uni0456': 1110,
'uni0457': 1111,
'uni0454': 1108,
'uni0455': 1109,
'icircumflex': 238,
'uni0307': 775,
'uni0304': 772,
'uni0305': 773,
'uni0269': 617,
'uni0268': 616,
'uni0300': 768,
'uni0301': 769,
'uni0265': 613,
'uni0264': 612,
'uni0267': 615,
'uni0266': 614,
'uni0261': 609,
'uni0260': 608,
'uni0263': 611,
'uni0262': 610,
'a': 97,
'uni2207': 8711,
'uni2247': 8775,
'uni2246': 8774,
'uni2241': 8769,
'uni2240': 8768,
'uni2243': 8771,
'uni2242': 8770,
'uni2312': 8978,
'ogonek': 731,
'uni2249': 8777,
'uni2248': 8776,
'uni3030': 12336,
'q': 113,
'uni21C2': 8642,
'uni21C1': 8641,
'uni21C0': 8640,
'uni21C7': 8647,
'uni21C6': 8646,
'uni21C5': 8645,
'uni21C4': 8644,
'uni225F': 8799,
'uni212C': 8492,
'uni21C8': 8648,
'uni2467': 9319,
'oacute': 243,
'uni028F': 655,
'uni028E': 654,
'uni026F': 623,
'uni028C': 652,
'uni028B': 651,
'uni028A': 650,
'uni2510': 9488,
'ograve': 242,
'edieresis': 235,
'uni22CE': 8910,
'uni22CF': 8911,
'uni219F': 8607,
'comma': 44,
'uni22CA': 8906,
'uni0429': 1065,
'uni03C6': 966,
'uni0427': 1063,
'uni0426': 1062,
'uni0425': 1061,
'uni0424': 1060,
'uni0423': 1059,
'uni0422': 1058,
'uni0421': 1057,
'uni0420': 1056,
'uni2465': 9317,
'uni24D0': 9424,
'uni2464': 9316,
'uni0430': 1072,
'otilde': 245,
'uni2661': 9825,
'uni24D6': 9430,
'uni2466': 9318,
'uni24D5': 9429,
'uni219A': 8602,
'uni2518': 9496,
'uni22B6': 8886,
'uni2461': 9313,
'uni24D4': 9428,
'uni2460': 9312,
'uni24EA': 9450,
'guillemotright': 187,
'ecircumflex': 234,
'greater': 62,
'uni2011': 8209,
'uacute': 250,
'uni2462': 9314,
'L': 76,
'bullet': 8226,
'uni02A4': 676,
'uni02A7': 679,
'cedilla': 184,
'uni02A2': 674,
'uni2015': 8213,
'uni22C4': 8900,
'uni22C5': 8901,
'uni22AD': 8877,
'uni22C7': 8903,
'uni22C0': 8896,
'uni2016': 8214,
'uni22C2': 8898,
'uni22C3': 8899,
'uni24CF': 9423,
'uni042F': 1071,
'uni042E': 1070,
'uni042D': 1069,
'ydieresis': 255,
'l': 108,
'logicalnot': 172,
'uni24CA': 9418,
'uni0287': 647,
'uni0286': 646,
'uni0285': 645,
'uni0284': 644,
'uni0283': 643,
'uni0282': 642,
'uni0281': 641,
'uni027C': 636,
'uni2664': 9828,
'exclamdown': 161,
'uni25C4': 9668,
'uni0289': 649,
'uni0288': 648,
'uni039A': 922,
'endash': 8211,
'uni2640': 9792,
'uni20E4': 8420,
'uni0473': 1139,
'uni20E1': 8417,
'uni2642': 9794,
'uni03B8': 952,
'uni03B9': 953,
'agrave': 224,
'uni03B4': 948,
'uni03B5': 949,
'uni03B6': 950,
'uni03B7': 951,
'uni03B0': 944,
'uni03B1': 945,
'uni03B2': 946,
'uni03B3': 947,
'uni2555': 9557,
'Adieresis': 196,
'germandbls': 223,
'Odieresis': 214,
'space': 32,
'uni0126': 294,
'uni0127': 295,
'uni0124': 292,
'uni0125': 293,
'uni0122': 290,
'uni0123': 291,
'uni0120': 288,
'uni0121': 289,
'quoteright': 8217,
'uni2560': 9568,
'uni2556': 9558,
'ucircumflex': 251,
'uni2561': 9569,
'uni2551': 9553,
'uni25B2': 9650,
'uni2550': 9552,
'uni2563': 9571,
'uni2553': 9555,
'G': 71,
'uni2564': 9572,
'uni2552': 9554,
'quoteleft': 8216,
'uni2565': 9573,
'uni2572': 9586,
'uni2568': 9576,
'uni2566': 9574,
'W': 87,
'uni214A': 8522,
'uni012F': 303,
'uni012D': 301,
'uni012E': 302,
'uni012B': 299,
'uni012C': 300,
'uni255C': 9564,
'uni012A': 298,
'uni2289': 8841,
'Q': 81,
'uni2320': 8992,
'uni2321': 8993,
'g': 103,
'uni03BD': 957,
'uni03BE': 958,
'uni03BF': 959,
'uni2282': 8834,
'uni2285': 8837,
'uni03BA': 954,
'uni03BB': 955,
'uni03BC': 956,
'uni2128': 8488,
'uni25B7': 9655,
'w': 119,
'uni0302': 770,
'uni03DE': 990,
'uni25DA': 9690,
'uni0303': 771,
'uni0463': 1123,
'uni0462': 1122,
'uni3018': 12312,
'uni2514': 9492,
'question': 63,
'uni25B3': 9651,
'uni24E1': 9441,
'one': 49,
'uni200A': 8202,
'uni2278': 8824,
'ring': 730,
'uni0195': 405,
'figuredash': 8210,
'uni22EC': 8940,
'uni0339': 825,
'uni0338': 824,
'uni0337': 823,
'uni0336': 822,
'uni0335': 821,
'uni0333': 819,
'uni0332': 818,
'uni0331': 817,
'uni0330': 816,
'uni01C1': 449,
'uni01C0': 448,
'uni01C3': 451,
'uni01C2': 450,
'uni2353': 9043,
'uni0308': 776,
'uni2218': 8728,
'uni2219': 8729,
'uni2216': 8726,
'uni2217': 8727,
'uni2214': 8724,
'uni0309': 777,
'uni2609': 9737,
'uni2213': 8723,
'uni2210': 8720,
'uni2211': 8721,
'uni2245': 8773,
'B': 66,
'uni25D6': 9686,
'iacute': 237,
'uni02E6': 742,
'uni02E7': 743,
'uni02E8': 744,
'uni02E9': 745,
'uni221D': 8733,
'uni221E': 8734,
'Ydieresis': 376,
'uni221C': 8732,
'uni22D7': 8919,
'uni221A': 8730,
'R': 82,
'uni24DC': 9436,
'uni033F': 831,
'uni033E': 830,
'uni033C': 828,
'uni033B': 827,
'uni033A': 826,
'b': 98,
'uni228A': 8842,
'uni22DB': 8923,
'uni2554': 9556,
'uni046B': 1131,
'uni046A': 1130,
'r': 114,
'uni24DB': 9435,
'Ccedilla': 199,
'minus': 8722,
'uni24DA': 9434,
'uni03F0': 1008,
'uni03F1': 1009,
'uni20AC': 8364,
'uni2276': 8822,
'uni24C0': 9408,
'uni0162': 354,
'uni0163': 355,
'uni011E': 286,
'uni011D': 285,
'uni011C': 284,
'uni011B': 283,
'uni0164': 356,
'uni0165': 357,
'Lslash': 321,
'uni0168': 360,
'uni0169': 361,
'uni25C9': 9673,
'uni02E5': 741,
'uni21C3': 8643,
'uni24C4': 9412,
'uni24E2': 9442,
'uni2277': 8823,
'uni013A': 314,
'uni2102': 8450,
'Uacute': 218,
'uni2317': 8983,
'uni2107': 8455,
'uni221F': 8735,
'yacute': 253,
'uni3012': 12306,
'Ucircumflex': 219,
'uni015D': 349,
'quotedbl': 34,
'uni25D9': 9689,
'uni2280': 8832,
'uni22AF': 8879,
'onehalf': 189,
'uni221B': 8731,
'Thorn': 222,
'uni2226': 8742,
'M': 77,
'uni25BA': 9658,
'uni2463': 9315,
'uni2336': 9014,
'eight': 56,
'uni2236': 8758,
'multiply': 215,
'uni210C': 8460,
'uni210A': 8458,
'uni21C9': 8649,
'grave': 96,
'uni210E': 8462,
'uni0117': 279,
'uni016C': 364,
'uni0115': 277,
'uni016A': 362,
'uni016F': 367,
'uni0112': 274,
'uni016D': 365,
'uni016E': 366,
'Ocircumflex': 212,
'uni2305': 8965,
'm': 109,
'uni24DF': 9439,
'uni0119': 281,
'uni0118': 280,
'uni20A3': 8355,
'uni20A4': 8356,
'uni20A7': 8359,
'uni2288': 8840,
'uni24C3': 9411,
'uni251C': 9500,
'uni228D': 8845,
'uni222F': 8751,
'uni222E': 8750,
'uni222D': 8749,
'uni222C': 8748,
'uni222B': 8747,
'uni222A': 8746,
'uni255B': 9563,
'Ugrave': 217,
'uni24DE': 9438,
'guilsinglright': 8250,
'uni250A': 9482,
'Ntilde': 209,
'uni0279': 633,
'questiondown': 191,
'uni256C': 9580,
'Atilde': 195,
'uni0272': 626,
'uni0273': 627,
'uni0270': 624,
'ccedilla': 231,
'uni0276': 630,
'uni0277': 631,
'uni0274': 628,
'uni0275': 629,
'uni2252': 8786,
'uni041F': 1055,
'uni2250': 8784,
'Z': 90,
'uni2256': 8790,
'uni2257': 8791,
'copyright': 169,
'uni2255': 8789,
'uni043D': 1085,
'uni043E': 1086,
'uni043F': 1087,
'yen': 165,
'uni041D': 1053,
'uni043B': 1083,
'uni043C': 1084,
'uni21B0': 8624,
'uni21B1': 8625,
'uni21B2': 8626,
'uni21B3': 8627,
'uni21B4': 8628,
'uni21B5': 8629,
'uni21B6': 8630,
'uni21B7': 8631,
'uni21B8': 8632,
'Eacute': 201,
'uni2311': 8977,
'uni2310': 8976,
'uni228F': 8847,
'uni25DB': 9691,
'uni21BA': 8634,
'uni21BB': 8635,
'uni21BC': 8636,
'uni2017': 8215,
'uni21BE': 8638,
'uni21BF': 8639,
'uni231C': 8988,
'H': 72,
'uni0293': 659,
'uni2202': 8706,
'uni22A4': 8868,
'uni231E': 8990,
'uni2232': 8754,
'uni225B': 8795,
'uni225C': 8796,
'uni24D9': 9433,
'uni225A': 8794,
'uni0438': 1080,
'uni0439': 1081,
'uni225D': 8797,
'uni225E': 8798,
'uni0434': 1076,
'X': 88,
'uni007F': 127,
'uni0437': 1079,
'Idieresis': 207,
'uni0431': 1073,
'uni0432': 1074,
'uni0433': 1075,
'uni22AC': 8876,
'uni22CD': 8909,
'uni25A3': 9635,
'bar': 124,
'uni24BB': 9403,
'uni037E': 894,
'uni027B': 635,
'h': 104,
'uni027A': 634,
'uni027F': 639,
'uni027D': 637,
'uni027E': 638,
'uni2227': 8743,
'uni2004': 8196,
'uni2225': 8741,
'uni2224': 8740,
'uni2223': 8739,
'uni2222': 8738,
'uni2221': 8737,
'uni2220': 8736,
'x': 120,
'uni2323': 8995,
'uni2559': 9561,
'uni2558': 9560,
'uni2229': 8745,
'uni2228': 8744,
'udieresis': 252,
'uni029D': 669,
'ordfeminine': 170,
'uni22CB': 8907,
'uni233D': 9021,
'uni0428': 1064,
'uni24C6': 9414,
'uni22DD': 8925,
'uni24C7': 9415,
'uni015C': 348,
'uni015B': 347,
'uni015A': 346,
'uni22AA': 8874,
'uni015F': 351,
'uni015E': 350,
'braceleft': 123,
'uni24C5': 9413,
'uni0410': 1040,
'uni03AA': 938,
'uni24C2': 9410,
'uni03AC': 940,
'uni03AB': 939,
'macron': 175,
'uni03AD': 941,
'uni03AF': 943,
'uni0294': 660,
'uni0295': 661,
'uni0296': 662,
'uni0297': 663,
'uni0290': 656,
'uni0291': 657,
'uni0292': 658,
'atilde': 227,
'Acircumflex': 194,
'uni2370': 9072,
'uni24C1': 9409,
'uni0298': 664,
'uni0299': 665,
'Oslash': 216,
'uni029E': 670,
'C': 67,
'quotedblleft': 8220,
'uni029B': 667,
'uni029C': 668,
'uni03A9': 937,
'uni03A8': 936,
'S': 83,
'uni24C9': 9417,
'uni03A1': 929,
'uni03A0': 928,
'exclam': 33,
'uni03A5': 933,
'uni03A4': 932,
'uni03A7': 935,
'Zcaron': 381,
'uni2133': 8499,
'uni2132': 8498,
'uni0159': 345,
'uni0158': 344,
'uni2137': 8503,
'uni2005': 8197,
'uni2135': 8501,
'uni2134': 8500,
'uni02BA': 698,
'uni2033': 8243,
'uni0151': 337,
'uni0150': 336,
'uni0157': 343,
'equal': 61,
'uni0155': 341,
'uni0154': 340,
's': 115,
'uni233F': 9023,
'eth': 240,
'uni24BE': 9406,
'uni21E9': 8681,
'uni2060': 8288,
'Egrave': 200,
'uni255D': 9565,
'uni24CD': 9421,
'uni21E1': 8673,
'uni21B9': 8633,
'hyphen': 45,
'uni01BE': 446,
'uni01BB': 443,
'period': 46,
'igrave': 236,
'uni01BA': 442,
'uni2296': 8854,
'uni2297': 8855,
'uni2294': 8852,
'uni2295': 8853,
'colon': 58,
'uni2293': 8851,
'uni2290': 8848,
'uni2291': 8849,
'uni032D': 813,
'uni032E': 814,
'uni032F': 815,
'uni032A': 810,
'uni032B': 811,
'uni032C': 812,
'uni231D': 8989,
'Ecircumflex': 202,
'uni24D7': 9431,
'uni25DD': 9693,
'trademark': 8482,
'Aacute': 193,
'cent': 162,
'uni0445': 1093,
'uni266E': 9838,
'uni266D': 9837,
'uni266B': 9835,
'uni03C9': 969,
'uni2003': 8195,
'uni2047': 8263,
'lslash': 322,
'uni03A6': 934,
'uni2043': 8259,
'uni250C': 9484,
'uni2040': 8256,
'uni255F': 9567,
'uni24CB': 9419,
'uni0472': 1138,
'uni0446': 1094,
'uni0474': 1140,
'uni0475': 1141,
'uni2508': 9480,
'uni2660': 9824,
'uni2506': 9478,
'uni2502': 9474,
'c': 99,
'uni2500': 9472,
'N': 78,
'uni22A6': 8870,
'uni21E7': 8679,
'uni2130': 8496,
'uni2002': 8194,
'breve': 728,
'uni0442': 1090,
'Oacute': 211,
'uni229F': 8863,
'uni25C7': 9671,
'uni229D': 8861,
'uni229E': 8862,
'guillemotleft': 171,
'uni0329': 809,
'uni24E5': 9445,
'uni011F': 287,
'uni0324': 804,
'uni0325': 805,
'uni0326': 806,
'uni0327': 807,
'uni0321': 801,
'uni0322': 802,
'n': 110,
'uni2032': 8242,
'uni2269': 8809,
'uni2268': 8808,
'uni0306': 774,
'uni226B': 8811,
'uni21EA': 8682,
'uni0166': 358,
'uni203B': 8251,
'uni01B5': 437,
'idieresis': 239,
'uni02BC': 700,
'uni01B0': 432,
'braceright': 125,
'seven': 55,
'uni02BB': 699,
'uni011A': 282,
'uni29FB': 10747,
'brokenbar': 166,
'uni2036': 8246,
'uni25C0': 9664,
'uni0156': 342,
'uni22D5': 8917,
'uni0258': 600,
'ugrave': 249,
'uni22D6': 8918,
'uni22D1': 8913,
'uni2034': 8244,
'uni22D3': 8915,
'uni22D2': 8914,
'uni203C': 8252,
'uni223E': 8766,
'uni02BF': 703,
'uni22D9': 8921,
'uni22D8': 8920,
'uni25BD': 9661,
'uni25BE': 9662,
'uni25BF': 9663,
'uni041B': 1051,
'periodcentered': 183,
'uni25BC': 9660,
'uni019E': 414,
'uni019B': 411,
'uni019A': 410,
'uni2007': 8199,
'uni0391': 913,
'uni0390': 912,
'uni0393': 915,
'uni0392': 914,
'uni0395': 917,
'uni0394': 916,
'uni0397': 919,
'uni0396': 918,
'uni0399': 921,
'uni0398': 920,
'uni25C8': 9672,
'uni2468': 9320,
'sterling': 163,
'uni22EB': 8939,
'uni039C': 924,
'uni039B': 923,
'uni039E': 926,
'uni039D': 925,
'uni039F': 927,
'I': 73,
'uni03E1': 993,
'uni03E0': 992,
'uni2319': 8985,
'uni228B': 8843,
'uni25B5': 9653,
'uni25B6': 9654,
'uni22EA': 8938,
'uni24B9': 9401,
'uni044E': 1102,
'uni0199': 409,
'uni2266': 8806,
'Y': 89,
'uni22A2': 8866,
'Eth': 208,
'uni266F': 9839,
'emdash': 8212,
'uni263B': 9787,
'uni24BD': 9405,
'uni22DE': 8926,
'uni0360': 864,
'uni2557': 9559,
'uni22DF': 8927,
'uni22DA': 8922,
'uni22DC': 8924,
'uni0361': 865,
'i': 105,
'uni24BF': 9407,
'uni0362': 866,
'uni263E': 9790,
'uni028D': 653,
'uni2259': 8793,
'uni0323': 803,
'uni2265': 8805,
'daggerdbl': 8225,
'y': 121,
'uni010A': 266,
'plusminus': 177,
'less': 60,
'uni21AE': 8622,
'uni0315': 789,
'uni230B': 8971,
'uni21AF': 8623,
'uni21AA': 8618,
'uni21AC': 8620,
'uni21AB': 8619,
'uni01FB': 507,
'uni01FC': 508,
'uni223A': 8762,
'uni01FA': 506,
'uni01FF': 511,
'uni01FD': 509,
'uni01FE': 510,
'uni2567': 9575,
'uni25E0': 9696,
'uni0104': 260,
'uni0105': 261,
'uni0106': 262,
'uni0107': 263,
'uni0100': 256,
'uni0101': 257,
'uni0102': 258,
'uni0103': 259,
'uni2038': 8248,
'uni2009': 8201,
'uni2008': 8200,
'uni0108': 264,
'uni0109': 265,
'uni02A1': 673,
'uni223B': 8763,
'uni226C': 8812,
'uni25AC': 9644,
'uni24D3': 9427,
'uni21E0': 8672,
'uni21E3': 8675,
'Udieresis': 220,
'uni21E2': 8674,
'D': 68,
'uni21E5': 8677,
'uni2621': 9761,
'uni21D1': 8657,
'uni203E': 8254,
'uni22C6': 8902,
'uni21E4': 8676,
'uni010D': 269,
'uni010E': 270,
'uni010F': 271,
'five': 53,
'T': 84,
'uni010B': 267,
'uni010C': 268,
'uni2605': 9733,
'uni2663': 9827,
'uni21E6': 8678,
'uni24B6': 9398,
'uni22C1': 8897,
'oslash': 248,
'acute': 180,
'uni01F0': 496,
'd': 100,
'OE': 338,
'uni22E3': 8931,
'Igrave': 204,
'uni2308': 8968,
'uni2309': 8969,
'uni21A9': 8617,
't': 116,
'uni2313': 8979,
'uni03A3': 931,
'uni21A4': 8612,
'uni21A7': 8615,
'uni21A6': 8614,
'uni21A1': 8609,
'uni21A0': 8608,
'uni21A3': 8611,
'uni21A2': 8610,
'parenright': 41,
'uni256A': 9578,
'uni25DC': 9692,
'uni24CE': 9422,
'uni042C': 1068,
'uni24E0': 9440,
'uni042B': 1067,
'uni0409': 1033,
'uni0408': 1032,
'uni24E7': 9447,
'uni25B4': 9652,
'uni042A': 1066,
'uni228E': 8846,
'uni0401': 1025,
'adieresis': 228,
'uni0403': 1027,
'quotesingle': 39,
'uni0405': 1029,
'uni0404': 1028,
'uni0407': 1031,
'uni0406': 1030,
'uni229C': 8860,
'uni2306': 8966,
'uni2253': 8787,
'twodotenleader': 8229,
'uni2131': 8497,
'uni21DA': 8666,
'uni2234': 8756,
'uni2235': 8757,
'uni01A5': 421,
'uni2237': 8759,
'uni2230': 8752,
'uni02CC': 716,
'slash': 47,
'uni01A0': 416,
'ellipsis': 8230,
'uni2299': 8857,
'uni2238': 8760,
'numbersign': 35,
'uni21A8': 8616,
'uni223D': 8765,
'uni01AF': 431,
'uni223F': 8767,
'uni01AD': 429,
'uni01AB': 427,
'odieresis': 246,
'uni223C': 8764,
'uni227D': 8829,
'uni0280': 640,
'O': 79,
'uni227E': 8830,
'uni21A5': 8613,
'uni22D4': 8916,
'uni25D4': 9684,
'uni227F': 8831,
'uni0435': 1077,
'uni2302': 8962,
'uni2669': 9833,
'uni24E3': 9443,
'uni2720': 10016,
'uni22A8': 8872,
'uni22A9': 8873,
'uni040A': 1034,
'uni22A7': 8871,
'oe': 339,
'uni040B': 1035,
'uni040E': 1038,
'uni22A3': 8867,
'o': 111,
'uni040F': 1039,
'Edieresis': 203,
'uni25D5': 9685,
'plus': 43,
'uni044D': 1101,
'uni263C': 9788,
'uni22E6': 8934,
'uni2283': 8835,
'uni258C': 9612,
'uni219E': 8606,
'uni24E4': 9444,
'uni2136': 8502,
'dagger': 8224,
'uni24B7': 9399,
'uni219B': 8603,
'uni22E5': 8933,
'three': 51,
'uni210B': 8459,
'uni2534': 9524,
'uni24B8': 9400,
'uni230A': 8970,
'hungarumlaut': 733,
'parenleft': 40,
'uni0148': 328,
'uni0149': 329,
'uni2124': 8484,
'uni2125': 8485,
'uni2126': 8486,
'uni2127': 8487,
'uni0140': 320,
'uni2129': 8489,
'uni25C5': 9669,
'uni0143': 323,
'uni0144': 324,
'uni0145': 325,
'uni0146': 326,
'uni0147': 327,
'uni210D': 8461,
'fraction': 8260,
'uni2031': 8241,
'uni2196': 8598,
'uni2035': 8245,
'uni24E6': 9446,
'uni016B': 363,
'uni24BA': 9402,
'uni266A': 9834,
'uni0116': 278,
'uni2115': 8469,
'registered': 174,
'J': 74,
'uni25DF': 9695,
'uni25CE': 9678,
'uni273D': 10045,
'dieresis': 168,
'uni212B': 8491,
'uni0114': 276,
'uni212D': 8493,
'uni212E': 8494,
'uni212F': 8495,
'uni014A': 330,
'uni014B': 331,
'uni014C': 332,
'uni014D': 333,
'uni014E': 334,
'uni014F': 335,
'uni025E': 606,
'uni24E8': 9448,
'uni0111': 273,
'uni24E9': 9449,
'Ograve': 210,
'j': 106,
'uni2195': 8597,
'uni2194': 8596,
'uni2197': 8599,
'uni2037': 8247,
'uni2191': 8593,
'uni2190': 8592,
'uni2193': 8595,
'uni2192': 8594,
'uni29FA': 10746,
'uni2713': 10003,
'z': 122,
'uni2199': 8601,
'uni2198': 8600,
'uni2667': 9831,
'ae': 230,
'uni0448': 1096,
'semicolon': 59,
'uni2666': 9830,
'uni038F': 911,
'uni0444': 1092,
'uni0447': 1095,
'uni038E': 910,
'uni0441': 1089,
'uni038C': 908,
'uni0443': 1091,
'uni038A': 906,
'uni0250': 592,
'uni0251': 593,
'uni0252': 594,
'uni0253': 595,
'uni0254': 596,
'at': 64,
'uni0256': 598,
'uni0257': 599,
'uni0167': 359,
'uni0259': 601,
'uni228C': 8844,
'uni2662': 9826,
'uni0319': 793,
'uni0318': 792,
'uni24BC': 9404,
'uni0402': 1026,
'uni22EF': 8943,
'Iacute': 205,
'uni22ED': 8941,
'uni22EE': 8942,
'uni0311': 785,
'uni0310': 784,
'uni21E8': 8680,
'uni0312': 786,
'percent': 37,
'uni0317': 791,
'uni0316': 790,
'uni21D6': 8662,
'uni21D7': 8663,
'uni21D4': 8660,
'uni21D5': 8661,
'uni21D2': 8658,
'uni21D3': 8659,
'uni21D0': 8656,
'uni2138': 8504,
'uni2270': 8816,
'uni2271': 8817,
'uni2272': 8818,
'uni2273': 8819,
'uni2274': 8820,
'uni2275': 8821,
'bracketright': 93,
'uni21D9': 8665,
'uni21DF': 8671,
'uni21DD': 8669,
'uni21DE': 8670,
'AE': 198,
'uni03AE': 942,
'uni227A': 8826,
'uni227B': 8827,
'uni227C': 8828,
'asterisk': 42,
'aacute': 225,
'uni226F': 8815,
'uni22E2': 8930,
'uni0386': 902,
'uni22E0': 8928,
'uni22E1': 8929,
'U': 85,
'uni22E7': 8935,
'uni22E4': 8932,
'uni0387': 903,
'uni031A': 794,
'eacute': 233,
'uni22E8': 8936,
'uni22E9': 8937,
'uni24D8': 9432,
'uni025A': 602,
'uni025B': 603,
'uni025C': 604,
'e': 101,
'uni0128': 296,
'uni025F': 607,
'uni2665': 9829,
'thorn': 254,
'uni0129': 297,
'uni253C': 9532,
'uni25D7': 9687,
'u': 117,
'uni0388': 904,
'uni0389': 905,
'uni0255': 597,
'uni0171': 369,
'uni0384': 900,
'uni0385': 901,
'uni044A': 1098,
'uni252C': 9516,
'uni044C': 1100,
'uni044B': 1099}
uni2type1 = dict([(v,k) for k,v in type12uni.items()])
tex2uni = {
'widehat': 0x0302,
'widetilde': 0x0303,
'langle': 0x27e8,
'rangle': 0x27e9,
'perp': 0x27c2,
'neq': 0x2260,
'Join': 0x2a1d,
'leqslant': 0x2a7d,
'geqslant': 0x2a7e,
'lessapprox': 0x2a85,
'gtrapprox': 0x2a86,
'lesseqqgtr': 0x2a8b,
'gtreqqless': 0x2a8c,
'triangleeq': 0x225c,
'eqslantless': 0x2a95,
'eqslantgtr': 0x2a96,
'backepsilon': 0x03f6,
'precapprox': 0x2ab7,
'succapprox': 0x2ab8,
'fallingdotseq': 0x2252,
'subseteqq': 0x2ac5,
'supseteqq': 0x2ac6,
'varpropto': 0x221d,
'precnapprox': 0x2ab9,
'succnapprox': 0x2aba,
'subsetneqq': 0x2acb,
'supsetneqq': 0x2acc,
'lnapprox': 0x2ab9,
'gnapprox': 0x2aba,
'longleftarrow': 0x27f5,
'longrightarrow': 0x27f6,
'longleftrightarrow': 0x27f7,
'Longleftarrow': 0x27f8,
'Longrightarrow': 0x27f9,
'Longleftrightarrow': 0x27fa,
'longmapsto': 0x27fc,
'leadsto': 0x21dd,
'dashleftarrow': 0x290e,
'dashrightarrow': 0x290f,
'circlearrowleft': 0x21ba,
'circlearrowright': 0x21bb,
'leftrightsquigarrow': 0x21ad,
'leftsquigarrow': 0x219c,
'rightsquigarrow': 0x219d,
'Game': 0x2141,
'hbar': 0x0127,
'hslash': 0x210f,
'ldots': 0x22ef,
'vdots': 0x22ee,
'doteqdot': 0x2251,
'doteq': 8784,
'partial': 8706,
'gg': 8811,
'asymp': 8781,
'blacktriangledown': 9662,
'otimes': 8855,
'nearrow': 8599,
'varpi': 982,
'vee': 8744,
'vec': 8407,
'smile': 8995,
'succnsim': 8937,
'gimel': 8503,
'vert': 124,
'|': 124,
'varrho': 1009,
'P': 182,
'approxident': 8779,
'Swarrow': 8665,
'textasciicircum': 94,
'imageof': 8887,
'ntriangleleft': 8938,
'nleq': 8816,
'div': 247,
'nparallel': 8742,
'Leftarrow': 8656,
'lll': 8920,
'oiint': 8751,
'ngeq': 8817,
'Theta': 920,
'origof': 8886,
'blacksquare': 9632,
'solbar': 9023,
'neg': 172,
'sum': 8721,
'Vdash': 8873,
'coloneq': 8788,
'degree': 176,
'bowtie': 8904,
'blacktriangleright': 9654,
'varsigma': 962,
'leq': 8804,
'ggg': 8921,
'lneqq': 8808,
'scurel': 8881,
'stareq': 8795,
'BbbN': 8469,
'nLeftarrow': 8653,
'nLeftrightarrow': 8654,
'k': 808,
'bot': 8869,
'BbbC': 8450,
'Lsh': 8624,
'leftleftarrows': 8647,
'BbbZ': 8484,
'digamma': 989,
'BbbR': 8477,
'BbbP': 8473,
'BbbQ': 8474,
'vartriangleright': 8883,
'succsim': 8831,
'wedge': 8743,
'lessgtr': 8822,
'veebar': 8891,
'mapsdown': 8615,
'Rsh': 8625,
'chi': 967,
'prec': 8826,
'nsubseteq': 8840,
'therefore': 8756,
'eqcirc': 8790,
'textexclamdown': 161,
'nRightarrow': 8655,
'flat': 9837,
'notin': 8713,
'llcorner': 8990,
'varepsilon': 949,
'bigtriangleup': 9651,
'aleph': 8501,
'dotminus': 8760,
'upsilon': 965,
'Lambda': 923,
'cap': 8745,
'barleftarrow': 8676,
'mu': 956,
'boxplus': 8862,
'mp': 8723,
'circledast': 8859,
'tau': 964,
'in': 8712,
'backslash': 92,
'varnothing': 8709,
'sharp': 9839,
'eqsim': 8770,
'gnsim': 8935,
'Searrow': 8664,
'updownarrows': 8645,
'heartsuit': 9825,
'trianglelefteq': 8884,
'ddag': 8225,
'sqsubseteq': 8849,
'mapsfrom': 8612,
'boxbar': 9707,
'sim': 8764,
'Nwarrow': 8662,
'nequiv': 8802,
'succ': 8827,
'vdash': 8866,
'Leftrightarrow': 8660,
'parallel': 8741,
'invnot': 8976,
'natural': 9838,
'ss': 223,
'uparrow': 8593,
'nsim': 8769,
'hookrightarrow': 8618,
'Equiv': 8803,
'approx': 8776,
'Vvdash': 8874,
'nsucc': 8833,
'leftrightharpoons': 8651,
'Re': 8476,
'boxminus': 8863,
'equiv': 8801,
'Lleftarrow': 8666,
'thinspace': 8201,
'll': 8810,
'Cup': 8915,
'measeq': 8798,
'upharpoonleft': 8639,
'lq': 8216,
'Upsilon': 933,
'subsetneq': 8842,
'greater': 62,
'supsetneq': 8843,
'Cap': 8914,
'L': 321,
'spadesuit': 9824,
'lrcorner': 8991,
'not': 824,
'bar': 772,
'rightharpoonaccent': 8401,
'boxdot': 8865,
'l': 322,
'leftharpoondown': 8637,
'bigcup': 8899,
'iint': 8748,
'bigwedge': 8896,
'downharpoonleft': 8643,
'textasciitilde': 126,
'subset': 8834,
'leqq': 8806,
'mapsup': 8613,
'nvDash': 8877,
'looparrowleft': 8619,
'nless': 8814,
'rightarrowbar': 8677,
'Vert': 8214,
'downdownarrows': 8650,
'uplus': 8846,
'simeq': 8771,
'napprox': 8777,
'ast': 8727,
'twoheaduparrow': 8607,
'doublebarwedge': 8966,
'Sigma': 931,
'leftharpoonaccent': 8400,
'ntrianglelefteq': 8940,
'nexists': 8708,
'times': 215,
'measuredangle': 8737,
'bumpeq': 8783,
'carriagereturn': 8629,
'adots': 8944,
'checkmark': 10003,
'lambda': 955,
'xi': 958,
'rbrace': 125,
'rbrack': 93,
'Nearrow': 8663,
'maltese': 10016,
'clubsuit': 9827,
'top': 8868,
'overarc': 785,
'varphi': 966,
'Delta': 916,
'iota': 953,
'nleftarrow': 8602,
'candra': 784,
'supset': 8835,
'triangleleft': 9665,
'gtreqless': 8923,
'ntrianglerighteq': 8941,
'quad': 8195,
'Xi': 926,
'gtrdot': 8919,
'leftthreetimes': 8907,
'minus': 8722,
'preccurlyeq': 8828,
'nleftrightarrow': 8622,
'lambdabar': 411,
'blacktriangle': 9652,
'kernelcontraction': 8763,
'Phi': 934,
'angle': 8736,
'spadesuitopen': 9828,
'eqless': 8924,
'mid': 8739,
'varkappa': 1008,
'Ldsh': 8626,
'updownarrow': 8597,
'beta': 946,
'textquotedblleft': 8220,
'rho': 961,
'alpha': 945,
'intercal': 8890,
'beth': 8502,
'grave': 768,
'acwopencirclearrow': 8634,
'nmid': 8740,
'nsupset': 8837,
'sigma': 963,
'dot': 775,
'Rightarrow': 8658,
'turnednot': 8985,
'backsimeq': 8909,
'leftarrowtail': 8610,
'approxeq': 8778,
'curlyeqsucc': 8927,
'rightarrowtail': 8611,
'Psi': 936,
'copyright': 169,
'yen': 165,
'vartriangleleft': 8882,
'rasp': 700,
'triangleright': 9655,
'precsim': 8830,
'infty': 8734,
'geq': 8805,
'updownarrowbar': 8616,
'precnsim': 8936,
'H': 779,
'ulcorner': 8988,
'looparrowright': 8620,
'ncong': 8775,
'downarrow': 8595,
'circeq': 8791,
'subseteq': 8838,
'bigstar': 9733,
'prime': 8242,
'lceil': 8968,
'Rrightarrow': 8667,
'oiiint': 8752,
'curlywedge': 8911,
'vDash': 8872,
'lfloor': 8970,
'ddots': 8945,
'exists': 8707,
'underbar': 817,
'Pi': 928,
'leftrightarrows': 8646,
'sphericalangle': 8738,
'coprod': 8720,
'circledcirc': 8858,
'gtrsim': 8819,
'gneqq': 8809,
'between': 8812,
'theta': 952,
'complement': 8705,
'arceq': 8792,
'nVdash': 8878,
'S': 167,
'wr': 8768,
'wp': 8472,
'backcong': 8780,
'lasp': 701,
'c': 807,
'nabla': 8711,
'dotplus': 8724,
'eta': 951,
'forall': 8704,
'eth': 240,
'colon': 58,
'sqcup': 8852,
'rightrightarrows': 8649,
'sqsupset': 8848,
'mapsto': 8614,
'bigtriangledown': 9661,
'sqsupseteq': 8850,
'propto': 8733,
'pi': 960,
'pm': 177,
'dots': 8230,
'nrightarrow': 8603,
'textasciiacute': 180,
'Doteq': 8785,
'breve': 774,
'sqcap': 8851,
'twoheadrightarrow': 8608,
'kappa': 954,
'vartriangle': 9653,
'diamondsuit': 9826,
'pitchfork': 8916,
'blacktriangleleft': 9664,
'nprec': 8832,
'vdots': 8942,
'curvearrowright': 8631,
'barwedge': 8892,
'multimap': 8888,
'textquestiondown': 191,
'cong': 8773,
'rtimes': 8906,
'rightzigzagarrow': 8669,
'rightarrow': 8594,
'leftarrow': 8592,
'__sqrt__': 8730,
'twoheaddownarrow': 8609,
'oint': 8750,
'bigvee': 8897,
'eqdef': 8797,
'sterling': 163,
'phi': 981,
'Updownarrow': 8661,
'backprime': 8245,
'emdash': 8212,
'Gamma': 915,
'i': 305,
'rceil': 8969,
'leftharpoonup': 8636,
'Im': 8465,
'curvearrowleft': 8630,
'wedgeq': 8793,
'fallingdotseq': 8786,
'curlyeqprec': 8926,
'questeq': 8799,
'less': 60,
'upuparrows': 8648,
'tilde': 771,
'textasciigrave': 96,
'smallsetminus': 8726,
'ell': 8467,
'cup': 8746,
'danger': 9761,
'nVDash': 8879,
'cdotp': 183,
'cdots': 8943,
'hat': 770,
'eqgtr': 8925,
'enspace': 8194,
'psi': 968,
'frown': 8994,
'acute': 769,
'downzigzagarrow': 8623,
'ntriangleright': 8939,
'cupdot': 8845,
'circleddash': 8861,
'oslash': 8856,
'mho': 8487,
'd': 803,
'sqsubset': 8847,
'cdot': 8901,
'Omega': 937,
'OE': 338,
'veeeq': 8794,
'Finv': 8498,
't': 865,
'leftrightarrow': 8596,
'swarrow': 8601,
'rightthreetimes': 8908,
'rightleftharpoons': 8652,
'lesssim': 8818,
'searrow': 8600,
'because': 8757,
'gtrless': 8823,
'star': 8902,
'nsubset': 8836,
'zeta': 950,
'dddot': 8411,
'bigcirc': 9675,
'Supset': 8913,
'circ': 8728,
'slash': 8725,
'ocirc': 778,
'prod': 8719,
'twoheadleftarrow': 8606,
'daleth': 8504,
'upharpoonright': 8638,
'odot': 8857,
'Uparrow': 8657,
'O': 216,
'hookleftarrow': 8617,
'trianglerighteq': 8885,
'nsime': 8772,
'oe': 339,
'nwarrow': 8598,
'o': 248,
'ddddot': 8412,
'downharpoonright': 8642,
'succcurlyeq': 8829,
'gamma': 947,
'scrR': 8475,
'dag': 8224,
'thickspace': 8197,
'frakZ': 8488,
'lessdot': 8918,
'triangledown': 9663,
'ltimes': 8905,
'scrB': 8492,
'endash': 8211,
'scrE': 8496,
'scrF': 8497,
'scrH': 8459,
'scrI': 8464,
'rightharpoondown': 8641,
'scrL': 8466,
'scrM': 8499,
'frakC': 8493,
'nsupseteq': 8841,
'circledR': 174,
'circledS': 9416,
'ngtr': 8815,
'bigcap': 8898,
'scre': 8495,
'Downarrow': 8659,
'scrg': 8458,
'overleftrightarrow': 8417,
'scro': 8500,
'lnsim': 8934,
'eqcolon': 8789,
'curlyvee': 8910,
'urcorner': 8989,
'lbrace': 123,
'Bumpeq': 8782,
'delta': 948,
'boxtimes': 8864,
'overleftarrow': 8406,
'prurel': 8880,
'clubsuitopen': 9831,
'cwopencirclearrow': 8635,
'geqq': 8807,
'rightleftarrows': 8644,
'ac': 8766,
'ae': 230,
'int': 8747,
'rfloor': 8971,
'risingdotseq': 8787,
'nvdash': 8876,
'diamond': 8900,
'ddot': 776,
'backsim': 8765,
'oplus': 8853,
'triangleq': 8796,
'check': 780,
'ni': 8715,
'iiint': 8749,
'ne': 8800,
'lesseqgtr': 8922,
'obar': 9021,
'supseteq': 8839,
'nu': 957,
'AA': 8491,
'AE': 198,
'models': 8871,
'ominus': 8854,
'dashv': 8867,
'omega': 969,
'rq': 8217,
'Subset': 8912,
'rightharpoonup': 8640,
'Rdsh': 8627,
'bullet': 8729,
'divideontimes': 8903,
'lbrack': 91,
'textquotedblright': 8221,
'Colon': 8759,
'%': 37,
'$': 36,
'{': 123,
'}': 125,
'_': 95,
'imath': 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to': 8594,
'succeq': 8829,
'emptyset': 8709,
'leftparen': 40,
'rightparen': 41,
'bigoplus': 10753,
'leftangle': 10216,
'rightangle': 10217,
'leftbrace': 124,
'rightbrace': 125,
'jmath': 567,
'bigodot': 10752,
'preceq': 8828,
'biguplus': 10756,
'epsilon': 949,
'vartheta': 977,
'bigotimes': 10754
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts)
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts)
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts)
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts)
(0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts)
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts)
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x005a, 'bf', 0xe38a), # A-Z
(0x0061, 0x007a, 'bf', 0xe39d), # a-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2113), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| gpl-3.0 |
nvoron23/statsmodels | statsmodels/examples/tsa/ex_var.py | 33 | 1280 |
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
from statsmodels.tsa.api import VAR
# some example data
mdata = sm.datasets.macrodata.load().data
mdata = mdata[['realgdp','realcons','realinv']]
names = mdata.dtype.names
data = mdata.view((float,3))
use_growthrate = False #True #False
if use_growthrate:
data = 100 * 4 * np.diff(np.log(data), axis=0)
model = VAR(data, names=names)
res = model.fit(4)
nobs_all = data.shape[0]
#in-sample 1-step ahead forecasts
fc_in = np.array([np.squeeze(res.forecast(model.y[t-20:t], 1))
for t in range(nobs_all-6,nobs_all)])
print(fc_in - res.fittedvalues[-6:])
#out-of-sample 1-step ahead forecasts
fc_out = np.array([np.squeeze(VAR(data[:t]).fit(2).forecast(data[t-20:t], 1))
for t in range(nobs_all-6,nobs_all)])
print(fc_out - data[nobs_all-6:nobs_all])
print(fc_out - res.fittedvalues[-6:])
#out-of-sample h-step ahead forecasts
h = 2
fc_out = np.array([VAR(data[:t]).fit(2).forecast(data[t-20:t], h)[-1]
for t in range(nobs_all-6-h+1,nobs_all-h+1)])
print(fc_out - data[nobs_all-6:nobs_all]) #out-of-sample forecast error
print(fc_out - res.fittedvalues[-6:])
import matplotlib.pyplot as plt
res.plot_forecast(20)
#plt.show()
| bsd-3-clause |
jaaguilar/trading-with-python | lib/extra.py | 77 | 2540 | '''
Created on Apr 28, 2013
Copyright: Jev Kuznetsov
License: BSD
'''
from __future__ import print_function
import sys
import urllib
import os
import xlrd # module for excel file reading
import pandas as pd
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def getSpyHoldings(dataDir):
''' get SPY holdings from the net, uses temp data storage to save xls file '''
dest = os.path.join(dataDir,"spy_holdings.xls")
if os.path.exists(dest):
print('File found, skipping download')
else:
print('saving to', dest)
urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700",
dest) # download xls file and save it to data directory
# parse
wb = xlrd.open_workbook(dest) # open xls file, create a workbook
sh = wb.sheet_by_index(0) # select first sheet
data = {'name':[], 'symbol':[], 'weight':[],'sector':[]}
for rowNr in range(5,505): # cycle through the rows
v = sh.row_values(rowNr) # get all row values
data['name'].append(v[0])
data['symbol'].append(v[1]) # symbol is in the second column, append it to the list
data['weight'].append(float(v[2]))
data['sector'].append(v[3])
return pd.DataFrame(data)
| bsd-3-clause |
pprett/statsmodels | statsmodels/examples/tut_ols_ancova.py | 4 | 2413 | '''Examples OLS
Note: uncomment plt.show() to display graphs
Summary:
========
Relevant part of construction of design matrix
xg includes group numbers/labels,
x1 is continuous explanatory variable
>>> dummy = (xg[:,None] == np.unique(xg)).astype(float)
>>> X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
Estimate the model
>>> res2 = sm.OLS(y, X).fit()
>>> print res2.params
[ 1.00901524 3.08466166 -2.84716135 9.94655423]
>>> print res2.bse
[ 0.07499873 0.71217506 1.16037215 0.38826843]
>>> prstd, iv_l, iv_u = wls_prediction_std(res2)
"Test hypothesis that all groups have same intercept"
>>> R = [[0, 1, 0, 0],
... [0, 0, 1, 0]]
>>> print res2.f_test(R)
<F test: F=array([[ 91.69986847]]), p=[[ 8.90826383e-17]], df_denom=46, df_num=2>
strongly rejected because differences in intercept are very large
'''
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#fix a seed for these examples
np.random.seed(98765789)
#OLS with dummy variables, similar to ANCOVA
#-------------------------------------------
#construct simulated example:
#3 groups common slope but different intercepts
nsample = 50
x1 = np.linspace(0, 20, nsample)
sig = 1.
#suppose observations from 3 groups
xg = np.zeros(nsample, int)
xg[20:40] = 1
xg[40:] = 2
#print xg
dummy = (xg[:,None] == np.unique(xg)).astype(float)
#use group 0 as benchmark
X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
beta = [1., 3, -3, 10]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#estimate
#~~~~~~~~
res2 = sm.OLS(y, X).fit()
#print "estimated parameters: x d1-d0 d2-d0 constant"
print res2.params
#print "standard deviation of parameter estimates"
print res2.bse
prstd, iv_l, iv_u = wls_prediction_std(res2)
#print res.summary()
#plot
#~~~~
plt.figure()
plt.plot(x1, y, 'o', x1, y_true, 'b-')
plt.plot(x1, res2.fittedvalues, 'r--.')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('3 groups: different intercepts, common slope; blue: true, red: OLS')
plt.show()
#Test hypothesis that all groups have same intercept
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
R = [[0, 1, 0, 0],
[0, 0, 1, 0]]
# F test joint hypothesis R * beta = 0
# i.e. coefficient on both dummy variables equal zero
print "Test hypothesis that all groups have same intercept"
print res2.f_test(R)
| bsd-3-clause |
SuFizz/Dealer-Loves-Code | SocialMediaProminence.py | 1 | 5539 | from __future__ import division
import urllib
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
facebook_weight = 0.4; #Because of the relatively large number of users on FaceBook for each company
twitter_weight = 0.3;
googleplus_weight = 0.3;
def popularity_among_customers(fk,eb,cr,am):
Players = ('Flipkart', 'EBay', 'Croma', 'Amazon');
y_pos = np.arange(len(Players))
scores = [];
scores.append(fk);
scores.append(eb);
scores.append(cr);
scores.append(am);
print scores
People_Likability = np.array(scores);
error = np.zeros(len(Players));
plt.barh(y_pos, People_Likability, xerr=error, align='center', alpha=0.4)
plt.yticks(y_pos, Players)
plt.xlabel('Players')
plt.title('People Likability')
plt.show();
def like_extract(url):
proxy = 'http://10.93.0.37:3333';
fb = urllib.urlopen(url,proxies={'https':proxy}) #Can sometimes have problems due to the lack of https authentication in urllib and urlopen
fb_lines = fb.readlines();
ind = fb_lines[60].index("stats fwb\">") + len("stats fwb\">");
indend = fb_lines[60].index("<",ind);
t = fb_lines[60][ind:indend];
if "m" in t:
t = float(t[:-1])*1e9;
return t;
def plusones_extract(url):
proxy = 'http://10.93.0.37:3333';
gp = urllib.urlopen(url,proxies={'https':proxy}) #Can sometimes have problems due to the lack of https authentication in urllib and urlopen
gp_lines = gp.readlines();
ind = gp_lines[62].index("<span class=\"Myb j0c\"") + len("<span class=\"Myb j0c\"");
indend = gp_lines[62].index("<",ind);
t = gp_lines[62][ind:indend];
return float(t);
def follow_extract(url):
proxy = 'http://10.93.0.37:3333';
tw = urllib.urlopen(url,proxies={'https':proxy}) #Can sometimes have problems due to the lack of https authentication in urllib and urlopen
tw_lines = gp.readlines();
for l in tw_lines:
if("Followers" in l):
ind = l.index("<strong>") + len("<strong>");
indend = l.index("</strong>");
t = l[ind:indend];
t=int(_.join(t.split(",")))
break;
return t;
if __name__ == "__main__":
fk_url_fb="https://www.facebook.com/flipkart";
fk_url_gp = "https://plus.google.com/u/0/+flipkart/posts";
fk_url_tw = "https://twitter.com/Flipkart";
eb_url_fb="https://www.facebook.com/ebay";
eb_url_gp = "https://plus.google.com/u/0/+eBay/posts";
eb_url_tw = "https://twitter.com/ebay";
cr_url_fb="https://www.facebook.com/CromaRetail";
cr_url_gp = "https://plus.google.com/116744904644384595704/posts";
cr_url_tw = "https://twitter.com/cromaretail";
am_url_fb="https://www.facebook.com/amazon";
am_url_gp = "https://plus.google.com/u/0/+amazon/posts";
am_url_tw = "https://twitter.com/Amazon";
Flipkart_facebook_likes = like_extract(fk_url_fb); #2357920
Flipkart_twitter_followers = follow_extract(fk_url_tw); #99485
Flipkart_googleplus_ones = plusones_extract(fk_url_gp); #282934
Ebay_facebook_likes = like_extract(eb_url_fb); #7009797
Ebay_twitter_followers = follow_extract(eb_url_tw); #297589
Ebay_googleplus_ones = plusones_extract(eb_url_gp); #439796
Croma_facebook_likes = like_extract(cr_url_fb); #89303
Croma_twitter_followers = follow_extract(cr_url_tw); #10140
Croma_googleplus_ones = plusones_extract(cr_url_gp); #308
Amazon_facebook_likes = like_extract(am_url_fb); #22675230
Amazon_twitter_followers = follow_extract(am_url_tw); #887350
Amazon_googleplus_ones = plusones_extract(am_url_gp); #433602
fk = facebook_weight*(Flipkart_facebook_likes)/(Flipkart_facebook_likes+Ebay_facebook_likes+Croma_facebook_likes+Amazon_facebook_likes) +twitter_weight*(Flipkart_twitter_followers)/(Flipkart_twitter_followers+Ebay_twitter_followers+Croma_twitter_followers+Amazon_twitter_followers) + googleplus_weight*(Flipkart_googleplus_ones)/(Flipkart_googleplus_ones+Ebay_googleplus_ones+Croma_googleplus_ones+Amazon_googleplus_ones);
eb = facebook_weight*(Ebay_facebook_likes)/(Flipkart_facebook_likes+Ebay_facebook_likes+Croma_facebook_likes+Amazon_facebook_likes) +twitter_weight*(Ebay_twitter_followers)/(Flipkart_twitter_followers+Ebay_twitter_followers+Croma_twitter_followers+Amazon_twitter_followers) + googleplus_weight*(Ebay_googleplus_ones)/(Flipkart_googleplus_ones+Ebay_googleplus_ones+Croma_googleplus_ones+Amazon_googleplus_ones);
cr = facebook_weight*(Croma_facebook_likes)/(Flipkart_facebook_likes+Ebay_facebook_likes+Croma_facebook_likes+Amazon_facebook_likes) +twitter_weight*(Croma_twitter_followers)/(Flipkart_twitter_followers+Ebay_twitter_followers+Croma_twitter_followers+Amazon_twitter_followers) + googleplus_weight*(Croma_googleplus_ones)/(Flipkart_googleplus_ones+Ebay_googleplus_ones+Croma_googleplus_ones+Amazon_googleplus_ones);
am = facebook_weight*(Croma_facebook_likes)/(Flipkart_facebook_likes+Ebay_facebook_likes+Croma_facebook_likes+Amazon_facebook_likes) +twitter_weight*(Croma_twitter_followers)/(Flipkart_twitter_followers+Ebay_twitter_followers+Croma_twitter_followers+Amazon_twitter_followers) + googleplus_weight*(Croma_googleplus_ones)/(Flipkart_googleplus_ones+Ebay_googleplus_ones+Croma_googleplus_ones+Amazon_googleplus_ones);
popularity_among_customers(fk,eb,cr,am);
| apache-2.0 |
ursk/sparco | sparco/parallel_sparsify.py | 1 | 26459 | """
sparsify data stream with given basis and save to file
Process in overlapping chunks, tie chunks together at end to generate
one file. Generate spike times file from that file.
"""
import os, sys
import numpy as np
# import matplotlib.pyplot as plt
import h5py
from time import time as now
import datetime
import ipdb
from mpi4py import MPI
home = os.path.expanduser('~')
path1 = os.path.join(home, 'Dropbox/nersc/csc/spikes/qn') # Cerberus
path2 = os.path.join(home, 'csc/spikes/qn') # Hopper
if not path1 in sys.path: sys.path.append(path1)
if not path2 in sys.path: sys.path.append(path2)
print "pathes", path1, path2
import sparseqn
from sparseqn import sparseqn_batch
import sptools
import mp
# mpi info
rank = MPI.COMM_WORLD.Get_rank()
procs = MPI.COMM_WORLD.Get_size()
root = 0
mpi = (rank, procs, root)
class Logger(object):
"""
Redirect stdout and stderr to a file and optionally echo to original stdout.
"""
def __init__(self, stream, logFile, rank, echo=False):
self.out = stream
self.logFile = logFile
self.echo = echo
self.rank = rank
def write(self, s):
if len(s) == 1: return
self.logFile.write('[%d] %s\n' % (self.rank, s))
if self.echo:
self.out.write('[%d] %s\n' % (self.rank, s))
@staticmethod
def start_logger(path, rank, echo, prefix):
logFile = open(os.path.join(path, '%s.log' % prefix) , 'w+', 0)
sys.stdout = Logger(sys.stdout, logFile, rank, echo)
sys.stderr = Logger(sys.stderr, logFile, rank, echo)
def block_copy(inf, outf, in0=0, out0=0, size=None, bsize=10000):
"""
Copy hdf5 dataset of size in bsize blocks
inf : input dataset
outf : output dataset
in0 : input offset
out0 : output offset
size : amount to copy
bsize : block size to use in copying
"""
if size is None: size = len(inf)
for i in range(0, size, bsize):
j = min(i+bsize, size)
# print 'Copying %d:%d to %d:%d' % (in0+i, in0+j, out0+i, out0+j)
outf[out0+i:out0+j] = inf[in0+i:in0+j][:]
class ParallelSparsify(object):
def __init__(self, recording, session,
basisf, reorder=True, subset=None,
method='mp', lam=1., positive=True,
T=1000, s=0.005, dtype='high', postfix='',
subsample=1, channels=None, maxit=25,
maxdata=None, debug=True, debug_root=None):
"""
recording : eg. 'tiger/p6'
session : eg. 26
basisf : filename containing hdf5 dataset phi
reorder : whether to reorder basis from information in basisf
subset : use a subset of basis to sparsify
method : eg. 'mp', 'owlbfgs'
lam : sparsity penality wherever L1 regularization is used
positive : coefficients kept non-negative if True
T : block size of sparsification
s : sparsity for mp (.1 means 10% of coefficients are on)
dtype : eg. 'high', 'down', 'low'
channels : use this subset of channels
maxdata : for debugging, only sparsify this much of the data
debug : debug output
debug_root : directory where debug plots are put
"""
self.recording = recording
self.session = session
self.basisf = basisf
self.reorder = reorder
self.subset = subset
self.method = method
self.lam = lam
self.positive = positive
self.T = T
self.s = s
self.maxit = maxit
self.dtype = dtype
self.postfix = postfix
self.subsample = subsample
self.channels = channels
self.maxdata = maxdata
self.debug = debug
self.debug_root = debug_root
self.mu = None
self.sigma = None
#self.rootdir = os.path.join(os.path.expanduser('~'), 'sn', 'py')
#self.debug_root = os.path.join(self.rootdir, 'daq', 'data/%s/%04d/out' % (recording, session))
#self.dataroot = os.path.join(self.rootdir, 'daq', 'data/%s/%04d' % (recording, session))
self.rootdir = os.path.join(os.path.expanduser('~'), 'Dropbox_outsource', 'nersc')
self.dataroot = os.path.join(self.rootdir, 'data')
self._load_data()
self._load_basis()
def _load_data(self):
"""
Load electrophysiology data
"""
if self.dtype == 'high':
filen = 'all.%04d_micro.high-butter%s.h5' % (self.session, self.postfix)
elif self.dtype == 'low':
filen = 'all.%04d_micro_down.low-butter%s.h5' % (self.session, self.postfix)
elif self.dtype == 'down':
filen = 'all.%04d_micro_down%s.h5' % (self.session, self.postfix)
elif self.dtype == 'climate':
filen = 'gautam_testdata0.h5'
elif self.dtype == 'ecog':
filen = 'xxxxxxxxxxxxx.h5'
self.filen = os.path.join(self.dataroot, filen)
print "opening file", self.filen
self.inh5 = h5py.File(self.filen, 'r')
self.data = self.inh5['data']
if self.channels is None:
self.channels = np.arange(self.data.shape[1])
if self.dtype == 'climate':
self.mean=np.array((0, ))
self.var=np.array((1, ))
self.std=np.array((1, ))
else:
self.mean = self.inh5['mean'][:][self.channels]
self.var = self.inh5['var'][:][self.channels]
self.std = np.sqrt(self.var - self.mean**2)
self.D = len(self.data) / self.subsample
if self.maxdata is not None:
self.D = self.maxdata
if rank == root:
print '[%d] Dataset has %d channels and %d timepoints' % (rank, len(self.channels), self.D)
def _load_basis(self, meanf=None, tighten=1):
"""
Load basis and reorder. Optionally used a subset.
'meanf', 'tighten' are parameters for the modified matching pursuit methods
"""
print "trying to load basis", self.basisf
basish5 = h5py.File(self.basisf, 'r')
self.phi = basish5['phi'][:]
self.C, self.N, self.P = self.phi.shape
if self.C != len(self.channels):
raise ValueError('Mismatch between data and basis')
self.C, self.N, self.P = self.phi.shape
if self.reorder:
try:
order = basish5['order'][:]
self.phi = self.phi[:,order].copy()
except:
print 'No order attribute for basis. Not reordering'
if self.subset is not None:
self.N = len(self.subset)
self.phi = self.phi[:,self.subset].copy()
if meanf is not None:
muh5 = h5py.File(meanf, 'r')
self.mu = muh5['mu'][:]
self.sigma = muh5['sigma'][:] / tighten
muh5.close()
if self.reorder:
self.mu = self.mu[order].copy()
self.sigma = self.sigma[order].copy()
# remove bfs with mean zero
nz = np.nonzero(self.mu)[0]
self.N = len(nz)
self.phi = self.phi[:,nz].copy()
self.mu = self.mu[nz].copy()
self.sigma = self.sigma[nz].copy()
print 'Left with %d basis functions' % len(nz)
basish5.close()
def _create_output(self, filen, T):
"""
Initialize coefficient output file. Use compression.
"""
try:
self.outh5 = h5py.File(filen, 'w')
except:
os.unlink(filen)
self.outh5 = h5py.File(filen, 'w')
self.out = self.outh5.create_dataset('data', shape=(T, self.N),
dtype=np.float32, compression='gzip',
chunks=(min(T,1000), self.N))
def open_output(self, filen):
"""
Open coefficient output file (don't truncate).
"""
self.outh5 = h5py.File(filen, 'r')
self.out = self.outh5['data']
def close(self):
"""
Close open files
"""
self.inh5.close()
self.outh5.close()
def get_data(self, t0, t1):
"""
Return data as channels, time
"""
t0 *= self.subsample
t1 *= self.subsample
return self.data[t0:t1:self.subsample,self.channels].astype(np.double).T
def sparsify(self):
"""
Split data into blocks, sparsify in parallel, and stitch
together by performing an inference.
"""
# split into blocks
bsize = self.D / procs
bounds = np.zeros((procs, 2), dtype=np.uint32)
b = 0
for i in range(procs):
bounds[i] = [b, b+bsize]
b += bsize
bounds[-1,1] = self.D
if self.debug and rank == root:
print ' block bounds: ', bounds
# initialize output file
fname = '%04d_micro_%s_coefficients%s' % (self.session, self.dtype, self.postfix)
fileroot = os.path.join(self.dataroot, fname)
filen = fileroot + '_proc%02d.h5' % rank
t0 = bounds[rank,0]
t1 = bounds[rank,1]
pad = self.P - 1
# create output file for block
self._create_output(filen, T=t1 - t0 + pad)
# run sparse coder
self._run(t0=t0, t1=t1)
# synchronize threads
MPI.COMM_WORLD.Barrier()
if rank == root:
# stitch together
self._stitch(bounds, fileroot)
def _run(self, t0, t1):
"""
Sparsify data set
t0, t1 : start and end times of block to sparsify
[TODO] avoid re-reading pad of data
[TODO] avoid transposing of data
[TODO] avoid zero-ing out of xv
"""
# if s is fractional and using mp, set to number of coefficients
if self.s < 1.:
self.s = int((self.P+self.T-1)*self.N * self.s)
# allocate memory for sparsification
# (sparsification routines use batches, so here we use 1 sample batches)
x = np.zeros((1, self.C, self.T))
xv = x[0] # view of x
a = np.zeros((1, self.T+self.P-1, self.N))
av = a[0] # view of a
pad = self.P - 1
# masking for mp uses inf to keep coefficient unchanged
# whereas for the quasinewton methods, 0 indicates no change,
# 1 indicate change
if 'mp' in self.method:
mask = np.zeros_like(av)
else:
mask = np.ones_like(av)
mask_col = 0
t = ot = 0
D = t1 - t0
finished = False
frame = expired = 0
if self.method == 'mp':
pursuit = mp.ConvolutionalMatchingPursuit(self.phi, s=self.s, T=self.T,
positive=self.positive, debug=self.debug>1)
if self.method == 'penalized-mp':
print 'using penalized mp'
extra = {'mu': self.mu, 'sigma': self.sigma, 'dt': 16}
pursuit = mp.PenalizedMP(self.phi, s=self.s, T=self.T,
positive=self.positive, debug=self.debug>1, extra=extra)
if self.method == 'refractory-mp':
print 'using refractory mp'
extra = {'dt': 16}
pursuit = mp.RefractoryMP(self.phi, s=self.s, T=self.T,
positive=self.positive, debug=self.debug>1, extra=extra)
tic = now()
while not finished:
if t+self.T > D:
self.T = D - t # [TODO] shouldn't change value of T
finished = True
xv[:,:self.T] = self.get_data(t0+t, t0+t+self.T)
xv[:,self.T:] = 0
xv -= self.mean[:,None]
xv /= self.std[:,None]
if 'mp' in self.method:
pursuit.run(x, A=a.transpose((0,2,1)), mask=mask.T)
elif self.method == 'owlbfgs':
sparsity_gain = 4
A = sparseqn_batch(self.phi, x, Sin=a.transpose((0,2,1)),
maxit=self.maxit, positive=self.positive,
delta=0.00001, debug=self.debug>2,
lam=sparsity_gain * self.lam,
mask=mask.T)
a[:] = A.transpose((0,2,1))
else:
raise ValueError('Bad method')
if self.debug>2:
self.debug_plot(xv, av, prefix='p%02d-%08d' % (rank, t))
if not finished:
if t == 0:
self.out[ot:ot+self.T] = av[:self.T]
# turn on masking for subsequent times
if 'mp' in self.method:
mask[:pad] = np.inf
else:
mask[:pad] = 0
mask_col = pad
ot += self.T
else:
self.out[ot:ot+self.T-pad] = av[pad:self.T]
ot += self.T-pad
if 'mp' in self.method:
av[:2*pad,:] = av[-2*pad:,:]
av[pad:].fill(0.)
else:
av[:2*pad,:] = av[-2*pad:,:]
av[2*pad:].fill(0.)
t += self.T - pad
else:
self.out[ot:ot+self.T] = av[pad:pad+self.T]
print '[%d] Completed with %d timepoints' % (rank, ot+self.T)
if ot + self.T != D + self.P - 1:
print '[%d] Warning, length of coeff data != length of data' % rank
# resizing dataset will be slow
self.out.resize((ot+self.T, self.N))
t += self.T
# print approximate time remaining
frame += 1
if frame % 10 == 0 and rank == root:
expired = now() - tic
left = str(datetime.timedelta(seconds=int((t1-t0) * expired / t - expired)))
print '[%d] %d (left: %s)' % (rank, t, left)
self.outh5.close()
def _stitch(self, bounds, fileroot):
"""
Stitch together final file
bounds : (number of blocks, start and end of blocks)
fileroot : for loading blocks and creating new file
"""
# create new file
fname = fileroot + '_merge.h5'
try:
sth5 = h5py.File(fname, 'w')
except:
os.unlink(fname)
sth5 = h5py.File(fname, 'w')
sth5.create_dataset('phi', data=self.phi)
sth5.create_dataset('mean', data=self.mean)
sth5.create_dataset('var', data=self.var)
sth5.create_dataset('std', data=self.std)
if self.mu is not None:
sth5.create_dataset('mu', data=self.mu)
sth5.create_dataset('sigma', data=self.sigma)
size = bounds[-1,1]+self.P-1
out = sth5.create_dataset('data', shape=(size, self.N),
dtype=np.float32, compression='gzip',
chunks=(min(size,1000), self.N))
# write out metadata
out.attrs['length'] = self.D
out.attrs['N'] = self.N
out.attrs['C'] = self.C
out.attrs['P'] = self.P
out.attrs['T'] = self.T
out.attrs['s'] = self.s
out.attrs['bounds'] = bounds
# open block files for reading
files = [fileroot + '_proc%02d.h5' % i for i in range(len(bounds))]
h5 = [h5py.File(f) for f in files]
d = [h['data'] for h in h5]
# allocate space
pad = self.P-1
x = np.zeros((1, self.C, 2*pad))
xv = x[0] # view of x
a = np.zeros((1, 3*pad, self.N))
av = a[0] # view of a
if 'mp' in self.method:
mask = np.zeros_like(av)
mask[:pad] = np.inf
mask[-pad:] = np.inf
s = self.s * 2*pad / self.T
else:
mask = np.ones_like(av)
mask[:pad] = 0
mask[-pad:] = 0
# method
if 'mp' in self.method:
print 'Using %g as s for stitching' % s
if self.method != 'mp': raise NotImplementedError()
pursuit = mp.ConvolutionalMatchingPursuit(self.phi, s=s, T=2*pad,
positive=self.positive, debug=self.debug>1)
print 'Stitching: '
# copy first block
t = bounds[0,1] - bounds[0,0]
block_copy(d[0], out, size=t)
print ' %d done' % t
# stitch intermediate blocks
for i in range(len(bounds)-1):
# get non-overlap coefficients
av.fill(0.)
av[:pad] = d[i][-2*pad:-pad]
av[-pad:] = d[i+1][pad:2*pad]
tb = bounds[i,1]
print 'Getting data: %d:%d' % (tb-pad, tb+pad)
xv[:] = self.get_data(tb-pad, tb+pad)
xv -= self.mean[:,None]
xv /= self.std[:,None]
A = a.transpose((0,2,1))
if 'mp' in self.method:
pursuit.run(x, A=A, mask=mask.T)
else:
A[:] = sparseqn_batch(self.phi, x, Sin=A, maxit=self.maxit, positive=self.positive,
delta=0.00001, debug=self.debug, lam=self.lam, mask=mask.T)
if self.debug:
self.debug_plot(xv, av, prefix='stitch-%08d' % t)
# write overlapping coefficients
out[t:t+pad] = av[pad:2*pad]
t += pad
# write remainder of coefficients up to pad
size = len(d[i+1]) - 2*pad
block_copy(d[i+1], out, in0=pad, out0=t, size=size)
t += size
print ' %d done' % t
# write remaining
out[t:t+pad] = d[-1][-pad:]
t += pad
print ' %d done' % t
if t != len(out):
print 'Warning: stitched dataset not correct length'
print ' stitiched %d != out %d' % (t, len(out))
print 'Finished stitching length %d dataset' % t
for h in h5:
h.close()
sth5.close()
# remove temporary files
for f in files:
try:
os.unlink(f)
except:
print 'Failed to remove temporary file: ', f
def reconstruct(self, A, volts=False):
"""
Reconstruct data from sparse coefficients for debugging
A : coefficients (time, basis)
volts : convert back to microvolts
Returns:
xhat - reconstructed data (batch, channel, time)
"""
T = len(A) - self.P + 1
xhat = np.zeros((self.C, T))
for b in range(self.P):
xhat += np.dot(self.phi[:,:,b], A[b:b+T].T)
if volts:
xhat *= self.std[:,None]
xhat += self.mean[:,None]
return xhat
def generate_spike_file(self, filen, subset=None, threshold=.0):
"""
Generate hdf5 file of spike times
[TODO] I stopped using this so it hasn't been updated
"""
spikeh5 = h5py.File(filen, 'w')
d = spikeh5.create_group('data')
d.create_dataset('phi', data=self.phi)
d.attrs['length'] = self.D
d.attrs['N'] = self.N
d.attrs['C'] = self.C
d.attrs['P'] = self.P
d.attrs['T'] = self.T # [TODO] this value will be wrong
d.attrs['s'] = self.s
d.attrs['threshold'] = threshold
if self.mu is not None:
spikeh5.create_dataset('mu', data=self.mu)
spikeh5.create_dataset('sigma', data=self.sigma)
if subset is None:
subset = range(self.N)
d.attrs['subset'] = subset
for i in subset:
print 'Generating spike dataset for basis %d' % i
# load data
# should take < .5gb for 1 hour recording
# [TODO] slow because of compression and access method
tic = now()
v = self.out[:,i][:]
print ' read took %g seconds' % (now() - tic)
n = d.create_group('%d' % i)
times = np.nonzero(v)[0]
print ' %d times for basis %d' % (len(times), i)
values = v[times]
if threshold > 0.:
#th = np.max(np.abs(values)) * threshold
good = np.abs(values) > threshold
times = times[good]
values = values[good]
if len(good) == 0:
times = values = np.array([])
print ' reduced to %d times' % (len(times))
ipdb.set_trace()
try:
n.create_dataset('t', data=times, dtype=np.uint32)
n.create_dataset('a', data=values, dtype=np.float32)
except:
pass
spikeh5.close()
def debug_plot(self, xv, av, prefix='dbg', figno=1):
"""
Plot data, reconstruction
"""
av = av.copy()
if not self.debug: return
self.gain = 1
self.ascale = 1.
xhatv = self.reconstruct(av)
# plot data, reconstruction, coefficients
plt.figure(figno)
plt.clf()
plt.ioff()
sparsity = (av != 0).sum() / float(av.size)
nrm = np.linalg.norm(xv)**2
error = np.linalg.norm(xv - xhatv)**2
snr = 10 * np.log10 ( nrm / error );
plt.suptitle('sparsity = %g, error = %g, snr = %g' % (sparsity, error/nrm, snr))
subplots = 4
splt = 1
plt.subplot(subplots,1,splt); splt += 1
mx = np.max(np.abs(xv))
plt.imshow(xv, vmin=-mx, vmax=mx, origin='lower', aspect='auto')
plt.subplot(subplots,1,splt); splt += 1
plt.imshow(xhatv, origin='lower', vmin=-mx, vmax=mx, aspect='auto')
plt.xticks([]); plt.yticks([])
plt.subplot(subplots,1,splt); splt += 1
plt.imshow(av.copy().T, aspect='auto')
plt.subplot(subplots,1,splt); splt += 1
pad = self.P-1
av[pad] = 1
av[2*pad] = 1
av[-pad] = 1
av[-2*pad] = 1
plt.spy(av.copy().T, aspect='auto')
plt.xticks([]); plt.yticks([])
plt.subplots_adjust(left=0.02, bottom=0.02, right=0.98, top=0.92,
wspace=0.15, hspace=0.15)
plt.draw()
plt.ion()
path = os.path.join(self.debug_root, 'movie')
try: os.makedirs(path)
except: pass
plt.savefig(os.path.join(path, prefix + '.png'))
plt.waitforbuttonpress(timeout=10)
def gautam_climate(recording, session, basisf, channels=None, method='owlbfgs'):
p = {'recording': recording,
'session': session,
'basisf': basisf,
'lam': 0.8,
'maxit': 50,
'positive': True,
'method': method,
'T': 1000,
'dtype': 'climate',
'postfix': 'test',
'subsample': 2,
'channels': channels,
'maxdata': None,
'debug': False}
ps = ParallelSparsify(**p)
ps.sparsify()
def ecog(recording, session, basisf, channels=None, method='owlbfgs'):
p = {'recording': recording,
'session': session,
'basisf': basisf,
'lam': 0.8,
'maxit': 50,
'positive': True,
'method': method,
'T': 1000,
'dtype': 'ecog',
'postfix': 'test',
'subsample': 2,
'channels': channels,
'maxdata': None,
'debug': False}
ps = ParallelSparsify(**p)
ps.sparsify()
if __name__ == '__main__':
# start logger for MPI session
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-r', '--recording', dest='recording', help='recording session, eg. tiger/p6')
(options, args) = parser.parse_args()
recording = options.recording
home = os.path.expanduser('~')
path = os.path.join(home, 'sn/py/daq/data')
print 'Using path: ', path
#Logger.start_logger(path, rank, echo=True, prefix='sparsify-%s' % recording.replace('/','-'))
recording = 'foo'
session = 66
basisf = '/Users/urs/sn/py/spikes/out/gautamtest/basis.h5';
basisf = '/Users/urs/sn/py/spikes/out/gautam_pca_whitened_negative/basis.h5'
# data file name hardcoded in line 157, as dataroot + gautam_testdata0.h5, dataroot = '~', 'Dropbox', 'nersc', 'data' need to use Dropbox_outsource
channels = range(64)
gautam_climate(recording, session, basisf, channels)
# standard function call is
# /Users/urs/anaconda/bin/mpirun -np 4 /Users/urs/anaconda/bin/python parallel_sparsify.py
| gpl-2.0 |
Sentient07/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 110 | 3768 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.