repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
daniel-severo/dask-ml | dask_ml/wrappers.py | 1 | 7516 | """Meta-estimators for parallelizing scikit-learn."""
import dask.array as da
import dask.dataframe as dd
import dask.delayed
import numpy as np
import sklearn.base
class ParallelPostFit(sklearn.base.BaseEstimator):
"""Meta-estimator for parallel predict and transform.
Parameters
----------
estimator : Estimator
The underlying estimator that is fit.
Notes
-----
.. warning::
This class is not appropriate for parallel or distributed *training*
on large datasets.
This estimator does not parallelize the training step. This simply calls
the underlying estimators's ``fit`` method called and copies over the
learned attributes to ``self`` afterwards.
It is helpful for situations where your training dataset is relatively
small (fits on a single machine) but you need to predict or transform
a much larger dataset. ``predict``, ``predict_proba`` and ``transform``
will be done in parallel (potentially distributed if you've connected
to a ``dask.distributed.Client``).
Note that many scikit-learn estimators already predict and transform in
parallel. This meta-estimator may still be useful in those cases when your
dataset is larger than memory, as the distributed scheduler will ensure the
data isn't all read into memory at once.
Examples
--------
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> import sklearn.datasets
>>> import dask_ml.datasets
Make a small 1,000 sample 2 training dataset and fit normally.
>>> X, y = sklearn.datasets.make_classification(n_samples=1000,
... random_state=0)
>>> clf = ParallelPostFit(estimator=GradientBoostingClassifier())
>>> clf.fit(X, y)
ParallelPostFit(estimator=GradientBoostingClassifier(...))
>>> clf.classes_
array([0, 1])
Transform and predict return dask outputs for dask inputs.
>>> X_big, y_big = dask_ml.datasets.make_classification(n_samples=100000,
random_state=0)
>>> clf.predict(X)
dask.array<predict, shape=(10000,), dtype=int64, chunksize=(1000,)>
Which can be computed in parallel.
>>> clf.predict_proba(X).compute()
array([[0.99141094, 0.00858906],
[0.93178389, 0.06821611],
[0.99129105, 0.00870895],
...,
[0.97996652, 0.02003348],
[0.98087444, 0.01912556],
[0.99407016, 0.00592984]])
"""
def __init__(self, estimator=None):
self.estimator = estimator
def fit(self, X, y=None, **kwargs):
"""Fit the underlying estimator.
Parameters
----------
X, y : array-like
Returns
-------
self : object
"""
result = self.estimator.fit(X, y, **kwargs)
# Copy over learned attributes
attrs = {k: v for k, v in vars(result).items() if k.endswith('_')}
for k, v in attrs.items():
setattr(self, k, v)
return self
def transform(self, X):
"""Transform block or partition-wise for dask inputs.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``transform`` method, then
an ``AttributeError`` is raised.
Parameters
----------
X : array-like
Returns
-------
transformed : array-like
"""
transform = self._check_method('transform')
if isinstance(X, da.Array):
return X.map_blocks(transform)
elif isinstance(X, dd._Frame):
return _apply_partitionwise(X, transform)
else:
return transform(X)
def score(self, X, y):
# TODO: re-implement some scoring functions.
return self.estimator.score(X, y)
def predict(self, X):
"""Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
Parameters
----------
X : array-like
Returns
-------
y : array-like
"""
predict = self._check_method('predict')
if isinstance(X, da.Array):
return X.map_blocks(predict, dtype='int', drop_axis=1)
elif isinstance(X, dd._Frame):
return _apply_partitionwise(X, predict)
else:
return predict(X)
def predict_proba(self, X):
"""Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
If the underlying estimator does not have a ``predict_proba``
method, then an ``AttributeError`` is raised.
Parameters
----------
X : array or dataframe
Returns
-------
y : array-like
"""
predict_proba = self._check_method('predict_proba')
if isinstance(X, da.Array):
# XXX: multiclass
return X.map_blocks(predict_proba,
dtype='float',
chunks=(X.chunks[0], len(self.classes_)))
elif isinstance(X, dd._Frame):
return _apply_partitionwise(X, predict_proba)
else:
return predict_proba(X)
def _check_method(self, method):
"""Check if self.estimator has 'method'.
Raises
------
AttributeError
"""
if not hasattr(self.estimator, method):
msg = ("The wrapped estimator '{}' does not have a "
"'{}' method.".format(self.estimator, method))
raise AttributeError(msg)
return getattr(self.estimator, method)
def _first_block(dask_object):
"""Extract the first block / partition from a dask object
"""
if isinstance(dask_object, da.Array):
if dask_object.ndim > 1 and dask_object.numblocks[-1] != 1:
raise NotImplementedError("IID estimators require that the array "
"blocked only along the first axis. "
"Rechunk your array before fitting.")
shape = (dask_object.chunks[0][0],)
if dask_object.ndim > 1:
shape = shape + (dask_object.chunks[1][0],)
return da.from_delayed(dask_object.to_delayed().flatten()[0],
shape,
dask_object.dtype)
if isinstance(dask_object, dd._Frame):
return dask_object.get_partition(0)
else:
return dask_object
def _apply_partitionwise(X, func):
"""Apply a prediction partition-wise to a dask.dataframe"""
sample = func(X._meta_nonempty)
if sample.ndim <= 1:
p = ()
else:
p = (sample.shape[1],)
if isinstance(sample, np.ndarray):
blocks = X.to_delayed()
arrays = [
da.from_delayed(dask.delayed(func)(block),
shape=(np.nan,) + p,
dtype=sample.dtype)
for block in blocks
]
return da.concatenate(arrays)
else:
return X.map_partitions(func, meta=sample)
| bsd-3-clause |
pystruct/pystruct | examples/plot_snakes_typed.py | 1 | 7289 | """
==============================================
Conditional Interactions on the Snakes Dataset
==============================================
This is a variant of plot_snakes.py where we use the NodeTypeEdgeFeatureGraphCRF
class instead of EdgeFeatureGraphCRF, despite there is only 1 type of nodes.
So this should give exact same results as plot_snakes.py
This example uses the snake dataset introduced in
Nowozin, Rother, Bagon, Sharp, Yao, Kohli: Decision Tree Fields ICCV 2011
This dataset is specifically designed to require the pairwise interaction terms
to be conditioned on the input, in other words to use non-trival edge-features.
The task is as following: a "snake" of length ten wandered over a grid. For
each cell, it had the option to go up, down, left or right (unless it came from
there). The input consists of these decisions, while the desired output is an
annotation of the snake from 0 (head) to 9 (tail). See the plots for an
example.
As input features we use a 3x3 window around each pixel (and pad with background
where necessary). We code the five different input colors (for up, down, left, right,
background) using a one-hot encoding. This is a rather naive approach, not using any
information about the dataset (other than that it is a 2d grid).
The task can not be solved using the simple DirectionalGridCRF - which can only
infer head and tail (which are also possible to infer just from the unary
features). If we add edge-features that contain the features of the nodes that are
connected by the edge, the CRF can solve the task.
From an inference point of view, this task is very hard. QPBO move-making is
not able to solve it alone, so we use the relaxed AD3 inference for learning.
PS: This example runs a bit (5 minutes on 12 cores, 20 minutes on one core for me).
But it does work as well as Decision Tree Fields ;)
JL Meunier - January 2017
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943
Copyright Xerox
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
# import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
from sklearn.metrics import confusion_matrix, accuracy_score
from pystruct.learners import OneSlackSSVM
from pystruct.datasets import load_snakes
from pystruct.utils import make_grid_edges, edge_list_to_features
#from pystruct.models import EdgeFeatureGraphCRF
from pystruct.models import NodeTypeEdgeFeatureGraphCRF
from plot_snakes import one_hot_colors, neighborhood_feature, prepare_data
def convertToSingleTypeX(X):
"""
For NodeTypeEdgeFeatureGraphCRF X is structured differently.
But NodeTypeEdgeFeatureGraphCRF can handle graph with a single node type. One needs to convert X to the new structure using this method.
"""
return [([nf], [e], [ef]) for (nf,e,ef) in X]
if __name__ == '__main__':
print("Please be patient. Learning will take 5-20 minutes.")
snakes = load_snakes()
X_train, Y_train = snakes['X_train'], snakes['Y_train']
X_train = [one_hot_colors(x) for x in X_train]
Y_train_flat = [y_.ravel() for y_ in Y_train]
X_train_directions, X_train_edge_features = prepare_data(X_train)
inference = 'ad3+'
# first, train on X with directions only:
crf = NodeTypeEdgeFeatureGraphCRF(1, [11], [45], [[2]], inference_method=inference)
ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1, max_iter=100,
n_jobs=1)
ssvm.fit(convertToSingleTypeX(X_train_directions), Y_train_flat)
# Evaluate using confusion matrix.
# Clearly the middel of the snake is the hardest part.
X_test, Y_test = snakes['X_test'], snakes['Y_test']
X_test = [one_hot_colors(x) for x in X_test]
Y_test_flat = [y_.ravel() for y_ in Y_test]
X_test_directions, X_test_edge_features = prepare_data(X_test)
Y_pred = ssvm.predict( convertToSingleTypeX(X_test_directions) )
print("Results using only directional features for edges")
print("Test accuracy: %.3f"
% accuracy_score(np.hstack(Y_test_flat), np.hstack(Y_pred)))
print(confusion_matrix(np.hstack(Y_test_flat), np.hstack(Y_pred)))
# now, use more informative edge features:
crf = NodeTypeEdgeFeatureGraphCRF(1, [11], [45], [[180]], inference_method=inference)
ssvm = OneSlackSSVM(crf, inference_cache=50, C=.1, tol=.1,
# switch_to='ad3',
#verbose=1,
n_jobs=8)
ssvm.fit( convertToSingleTypeX(X_train_edge_features), Y_train_flat)
Y_pred2 = ssvm.predict( convertToSingleTypeX(X_test_edge_features) )
print("Results using also input features for edges")
print("Test accuracy: %.3f"
% accuracy_score(np.hstack(Y_test_flat), np.hstack(Y_pred2)))
print(confusion_matrix(np.hstack(Y_test_flat), np.hstack(Y_pred2)))
# if False:
# # plot stuff
# fig, axes = plt.subplots(2, 2)
# axes[0, 0].imshow(snakes['X_test'][0], interpolation='nearest')
# axes[0, 0].set_title('Input')
# y = Y_test[0].astype(np.int)
# bg = 2 * (y != 0) # enhance contrast
# axes[0, 1].matshow(y + bg, cmap=plt.cm.Greys)
# axes[0, 1].set_title("Ground Truth")
# axes[1, 0].matshow(Y_pred[0].reshape(y.shape) + bg, cmap=plt.cm.Greys)
# axes[1, 0].set_title("Prediction w/o edge features")
# axes[1, 1].matshow(Y_pred2[0].reshape(y.shape) + bg, cmap=plt.cm.Greys)
# axes[1, 1].set_title("Prediction with edge features")
# for a in axes.ravel():
# a.set_xticks(())
# a.set_yticks(())
# plt.show()
"""
Please be patient. Learning will take 5-20 minutes.
Results using only directional features for edges
Test accuracy: 0.847
[[2750 0 0 0 0 0 0 0 0 0 0]
[ 0 99 0 0 1 0 0 0 0 0 0]
[ 0 2 68 3 9 4 6 4 3 1 0]
[ 0 4 11 45 8 14 5 6 0 6 1]
[ 0 1 22 18 31 2 14 4 3 5 0]
[ 0 3 7 38 12 22 5 4 2 7 0]
[ 0 2 19 16 26 8 16 2 9 2 0]
[ 0 6 14 26 10 15 5 12 2 10 0]
[ 0 0 12 15 16 4 16 2 18 4 13]
[ 0 2 5 18 6 8 5 3 2 50 1]
[ 0 1 11 4 13 1 2 0 2 2 64]]
Results using also input features for edges
Test accuracy: 0.998
[[2749 0 0 0 0 0 0 0 1 0 0]
[ 0 100 0 0 0 0 0 0 0 0 0]
[ 0 0 100 0 0 0 0 0 0 0 0]
[ 0 0 0 99 0 0 0 0 0 1 0]
[ 0 0 0 0 99 0 1 0 0 0 0]
[ 0 0 0 1 0 98 0 1 0 0 0]
[ 0 0 0 0 1 0 99 0 0 0 0]
[ 0 0 0 0 0 1 0 99 0 0 0]
[ 0 0 0 0 0 0 0 0 100 0 0]
[ 0 0 0 0 0 0 0 1 0 99 0]
[ 0 0 0 0 0 0 0 0 0 0 100]]
""" | bsd-2-clause |
ldamewood/kaggle | facebook/combine.py | 1 | 1058 | # -*- coding: utf-8 -*-
import pandas as pd
from itertools import izip
import numpy as np
import glob
from facebook import FacebookCompetition
print('Loading test data')
bids = pd.read_csv(FacebookCompetition.__data__['bids'])
test = pd.read_csv(FacebookCompetition.__data__['test'])
te = pd.merge(test, bids, how='left')
del bids
files = glob.glob('data/facebook.te.*.txt.gz')
its = [iter(pd.read_table(f, header=-1, iterator=True, chunksize=2**15, compression='gzip')) for f in files]
#with open('data/facebook_softmax_20150506.csv', 'w') as out:
c = []
for i,chunks in enumerate(izip(*its)):
print(i)
A = np.array([np.c_[chunk.values,1-chunk.values] for chunk in chunks])
A = np.exp(np.log(A).mean(axis=0))
A /= A.sum(axis=1)[:, np.newaxis]
A = A[:,0]
df = pd.DataFrame(A)
df.index = chunks[0].index
df.columns = chunks[0].columns
c.append(df)
df = pd.concat(c)
df.index = te.bidder_id
df = df.groupby(level=0).mean()
df.columns = ['prediction']
df.to_csv('data/facebook.te.20150509_1.csv', index_label='bidder_id') | mit |
canavandl/bokeh | examples/plotting/server/burtin.py | 42 | 4826 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_server
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_server("burtin")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
| bsd-3-clause |
zorojean/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/skimage/viewer/canvastools/painttool.py | 23 | 6437 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
LABELS_CMAP = mcolors.ListedColormap(['white', 'red', 'dodgerblue', 'gold',
'greenyellow', 'blueviolet'])
from ...viewer.canvastools.base import CanvasToolBase
__all__ = ['PaintTool']
class PaintTool(CanvasToolBase):
"""Widget for painting on top of a plot.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
overlay_shape : shape tuple
2D shape tuple used to initialize overlay image.
alpha : float (between [0, 1])
Opacity of overlay
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
rect_props : dict
Properties for :class:`matplotlib.patches.Rectangle`. This class
redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.
Attributes
----------
overlay : array
Overlay of painted labels displayed on top of image.
label : int
Current paint color.
"""
def __init__(self, manager, overlay_shape, radius=5, alpha=0.3,
on_move=None, on_release=None, on_enter=None,
rect_props=None):
super(PaintTool, self).__init__(manager, on_move=on_move,
on_enter=on_enter,
on_release=on_release)
props = dict(edgecolor='r', facecolor='0.7', alpha=0.5, animated=True)
props.update(rect_props if rect_props is not None else {})
self.alpha = alpha
self.cmap = LABELS_CMAP
self._overlay_plot = None
self.shape = overlay_shape
self._cursor = plt.Rectangle((0, 0), 0, 0, **props)
self._cursor.set_visible(False)
self.ax.add_patch(self._cursor)
# `label` and `radius` can only be set after initializing `_cursor`
self.label = 1
self.radius = radius
# Note that the order is important: Redraw cursor *after* overlay
self.artists = [self._overlay_plot, self._cursor]
self.manager.add_tool(self)
@property
def label(self):
return self._label
@label.setter
def label(self, value):
if value >= self.cmap.N:
raise ValueError('Maximum label value = %s' % len(self.cmap - 1))
self._label = value
self._cursor.set_edgecolor(self.cmap(value))
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, r):
self._radius = r
self._width = 2 * r + 1
self._cursor.set_width(self._width)
self._cursor.set_height(self._width)
self.window = CenteredWindow(r, self._shape)
@property
def overlay(self):
return self._overlay
@overlay.setter
def overlay(self, image):
self._overlay = image
if image is None:
self.ax.images.remove(self._overlay_plot)
self._overlay_plot = None
elif self._overlay_plot is None:
props = dict(cmap=self.cmap, alpha=self.alpha,
norm=mcolors.NoNorm(), animated=True)
self._overlay_plot = self.ax.imshow(image, **props)
else:
self._overlay_plot.set_data(image)
self.redraw()
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, shape):
self._shape = shape
if not self._overlay_plot is None:
self._overlay_plot.set_extent((-0.5, shape[1] + 0.5,
shape[0] + 0.5, -0.5))
self.radius = self._radius
self.overlay = np.zeros(shape, dtype='uint8')
def on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.redraw()
def on_mouse_press(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return
self.update_cursor(event.xdata, event.ydata)
self.update_overlay(event.xdata, event.ydata)
def on_mouse_release(self, event):
if event.button != 1:
return
self.callback_on_release(self.geometry)
def on_move(self, event):
if not self.ax.in_axes(event):
self._cursor.set_visible(False)
self.redraw() # make sure cursor is not visible
return
self._cursor.set_visible(True)
self.update_cursor(event.xdata, event.ydata)
if event.button != 1:
self.redraw() # update cursor position
return
self.update_overlay(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update_overlay(self, x, y):
overlay = self.overlay
overlay[self.window.at(y, x)] = self.label
# Note that overlay calls `redraw`
self.overlay = overlay
def update_cursor(self, x, y):
x = x - self.radius - 1
y = y - self.radius - 1
self._cursor.set_xy((x, y))
@property
def geometry(self):
return self.overlay
class CenteredWindow(object):
"""Window that create slices numpy arrays over 2D windows.
Examples
--------
>>> a = np.arange(16).reshape(4, 4)
>>> w = CenteredWindow(1, a.shape)
>>> a[w.at(1, 1)]
array([[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10]])
>>> a[w.at(0, 0)]
array([[0, 1],
[4, 5]])
>>> a[w.at(4, 3)]
array([[14, 15]])
"""
def __init__(self, radius, array_shape):
self.radius = radius
self.array_shape = array_shape
def at(self, row, col):
h, w = self.array_shape
r = self.radius
xmin = max(0, col - r)
xmax = min(w, col + r + 1)
ymin = max(0, row - r)
ymax = min(h, row + r + 1)
return [slice(ymin, ymax), slice(xmin, xmax)]
if __name__ == '__main__': # pragma: no cover
np.testing.rundocs()
from ... import data
from ...viewer import ImageViewer
image = data.camera()
viewer = ImageViewer(image)
paint_tool = PaintTool(viewer, image.shape)
viewer.show()
| gpl-3.0 |
woodscn/scipy | doc/source/tutorial/examples/normdiscr_plot1.py | 84 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) #integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd=rvs
f,l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
SiLab-Bonn/testbeam_analysis | testbeam_analysis/dut_alignment.py | 1 | 93557 | ''' All DUT alignment functions in space and time are listed here plus additional alignment check functions'''
from __future__ import division
import logging
import re
import os
import progressbar
import warnings
from collections import Iterable
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
import tables as tb
import numpy as np
from scipy.optimize import curve_fit, minimize_scalar, leastsq, basinhopping, OptimizeWarning, minimize
from matplotlib.backends.backend_pdf import PdfPages
from testbeam_analysis.tools import analysis_utils
from testbeam_analysis.tools import plot_utils
from testbeam_analysis.tools import geometry_utils
from testbeam_analysis.tools import data_selection
# Imports for track based alignment
from testbeam_analysis.track_analysis import fit_tracks
from testbeam_analysis.result_analysis import calculate_residuals
warnings.simplefilter("ignore", OptimizeWarning) # Fit errors are handled internally, turn of warnings
def correlate_cluster(input_cluster_files, output_correlation_file, n_pixels, pixel_size=None, dut_names=None, plot=True, chunk_size=4999999):
'''"Calculates the correlation histograms from the cluster arrays.
The 2D correlation array of pairs of two different devices are created on event basis.
All permutations are considered (all clusters of the first device are correlated with all clusters of the second device).
Parameters
----------
input_cluster_files : iterable
Iterable of filenames of the cluster files.
output_correlation_file : string
Filename of the output correlation file with the correlation histograms.
n_pixels : iterable of tuples
One tuple per DUT describing the total number of pixels (column/row),
e.g. for two FE-I4 DUTs [(80, 336), (80, 336)].
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension (column/row),
e.g. for two FE-I4 DUTs [(250, 50), (250, 50)].
If None, assuming same pixel size for all DUTs.
dut_names : iterable of strings
Names of the DUTs. If None, the DUT index will be used.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Correlating the index of %d DUTs ===', len(input_cluster_files))
with tb.open_file(output_correlation_file, mode="w") as out_file_h5:
n_duts = len(input_cluster_files)
# Result arrays to be filled
column_correlations = []
row_correlations = []
for dut_index in range(1, n_duts):
shape_column = (n_pixels[dut_index][0], n_pixels[0][0])
shape_row = (n_pixels[dut_index][1], n_pixels[0][1])
column_correlations.append(np.zeros(shape_column, dtype=np.int32))
row_correlations.append(np.zeros(shape_row, dtype=np.int32))
start_indices = [None] * n_duts # Store the loop indices for speed up
with tb.open_file(input_cluster_files[0], mode='r') as in_file_h5: # Open DUT0 cluster file
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80)
progress_bar.start()
pool = Pool() # Provide worker pool
for cluster_dut_0, start_indices[0] in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, start_index=start_indices[0], chunk_size=chunk_size): # Loop over the cluster of DUT0 in chunks
actual_event_numbers = cluster_dut_0[:]['event_number']
# Create correlation histograms to the reference device for all other devices
# Do this in parallel to safe time
dut_results = []
for dut_index, cluster_file in enumerate(input_cluster_files[1:], start=1): # Loop over the other cluster files
dut_results.append(pool.apply_async(_correlate_cluster, kwds={'cluster_dut_0': cluster_dut_0,
'cluster_file': cluster_file,
'start_index': start_indices[dut_index],
'start_event_number': actual_event_numbers[0],
'stop_event_number': actual_event_numbers[-1] + 1,
'column_correlation': column_correlations[dut_index - 1],
'row_correlation': row_correlations[dut_index - 1],
'chunk_size': chunk_size
}
))
# Collect results when available
for dut_index, dut_result in enumerate(dut_results, start=1):
(start_indices[dut_index], column_correlations[dut_index - 1], row_correlations[dut_index - 1]) = dut_result.get()
progress_bar.update(start_indices[0])
pool.close()
pool.join()
# Store the correlation histograms
for dut_index in range(n_duts - 1):
out_col = out_file_h5.create_carray(out_file_h5.root, name='CorrelationColumn_%d_0' % (dut_index + 1), title='Column Correlation between DUT%d and DUT%d' % (dut_index + 1, 0), atom=tb.Atom.from_dtype(column_correlations[dut_index].dtype), shape=column_correlations[dut_index].shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_row = out_file_h5.create_carray(out_file_h5.root, name='CorrelationRow_%d_0' % (dut_index + 1), title='Row Correlation between DUT%d and DUT%d' % (dut_index + 1, 0), atom=tb.Atom.from_dtype(row_correlations[dut_index].dtype), shape=row_correlations[dut_index].shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
out_col.attrs.filenames = [str(input_cluster_files[0]), str(input_cluster_files[dut_index])]
out_row.attrs.filenames = [str(input_cluster_files[0]), str(input_cluster_files[dut_index])]
out_col[:] = column_correlations[dut_index]
out_row[:] = row_correlations[dut_index]
progress_bar.finish()
if plot:
plot_utils.plot_correlations(input_correlation_file=output_correlation_file, pixel_size=pixel_size, dut_names=dut_names)
def merge_cluster_data(input_cluster_files, output_merged_file, n_pixels, pixel_size, chunk_size=4999999):
'''Takes the cluster from all cluster files and merges them into one big table aligned at a common event number.
Empty entries are signaled with column = row = charge = nan. Position is translated from indices to um. The
local coordinate system origin (0, 0) is defined in the sensor center, to decouple translation and rotation.
Cluster position errors are calculated from cluster dimensions.
Parameters
----------
input_cluster_files : list of pytables files
File name of the input cluster files with correlation data.
output_merged_file : pytables file
File name of the output tracklet file.
n_pixels : iterable of tuples
One tuple per DUT describing the total number of pixels (column/row),
e.g. for two FE-I4 DUTs [(80, 336), (80, 336)].
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension (column/row),
e.g. for two FE-I4 DUTs [(250, 50), (250, 50)].
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Merge cluster files from %d DUTs to merged hit file ===', len(input_cluster_files))
# Create result array description, depends on the number of DUTs
description = [('event_number', np.int64)]
for index, _ in enumerate(input_cluster_files):
description.append(('x_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('y_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('z_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('charge_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('n_hits_dut_%d' % index, np.int8))
description.extend([('track_quality', np.uint32), ('n_tracks', np.int8)])
for index, _ in enumerate(input_cluster_files):
description.append(('xerr_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('yerr_dut_%d' % index, np.float))
for index, _ in enumerate(input_cluster_files):
description.append(('zerr_dut_%d' % index, np.float))
start_indices_merging_loop = [None] * len(input_cluster_files) # Store the merging loop indices for speed up
start_indices_data_loop = [None] * len(input_cluster_files) # Additional store indices for the data loop
actual_start_event_number = None # Defines the first event number of the actual chunk for speed up. Cannot be deduced from DUT0, since this DUT could have missing event numbers.
# Merge the cluster data from different DUTs into one table
with tb.open_file(output_merged_file, mode='w') as out_file_h5:
merged_cluster_table = out_file_h5.create_table(out_file_h5.root, name='MergedCluster', description=np.zeros((1,), dtype=description).dtype, title='Merged cluster on event number', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
with tb.open_file(input_cluster_files[0], mode='r') as in_file_h5: # Open DUT0 cluster file
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80)
progress_bar.start()
for actual_cluster_dut_0, start_indices_data_loop[0] in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, start_index=start_indices_data_loop[0], start_event_number=actual_start_event_number, stop_event_number=None, chunk_size=chunk_size): # Loop over the cluster of DUT0 in chunks
actual_event_numbers = actual_cluster_dut_0[:]['event_number']
# First loop: calculate the minimum event number indices needed to merge all cluster from all files to this event number index
common_event_numbers = actual_event_numbers
for dut_index, cluster_file in enumerate(input_cluster_files[1:], start=1): # Loop over the other cluster files
with tb.open_file(cluster_file, mode='r') as actual_in_file_h5: # Open DUT0 cluster file
for actual_cluster, start_indices_merging_loop[dut_index] in analysis_utils.data_aligned_at_events(actual_in_file_h5.root.Cluster, start_index=start_indices_merging_loop[dut_index], start_event_number=actual_start_event_number, stop_event_number=actual_event_numbers[-1] + 1, chunk_size=chunk_size, fail_on_missing_events=False): # Loop over the cluster in the actual cluster file in chunks
common_event_numbers = analysis_utils.get_max_events_in_both_arrays(common_event_numbers, actual_cluster[:]['event_number'])
merged_cluster_array = np.zeros(shape=(common_event_numbers.shape[0],), dtype=description) # resulting array to be filled
for index, _ in enumerate(input_cluster_files):
# for no hit: column = row = charge = nan
merged_cluster_array['x_dut_%d' % (index)] = np.nan
merged_cluster_array['y_dut_%d' % (index)] = np.nan
merged_cluster_array['z_dut_%d' % (index)] = np.nan
merged_cluster_array['charge_dut_%d' % (index)] = np.nan
merged_cluster_array['xerr_dut_%d' % (index)] = np.nan
merged_cluster_array['yerr_dut_%d' % (index)] = np.nan
merged_cluster_array['zerr_dut_%d' % (index)] = np.nan
# Set the event number
merged_cluster_array['event_number'] = common_event_numbers[:]
# Fill result array with DUT 0 data
actual_cluster_dut_0 = analysis_utils.map_cluster(common_event_numbers, actual_cluster_dut_0)
# Select real hits, values with nan are virtual hits
selection = ~np.isnan(actual_cluster_dut_0['mean_column'])
# Convert indices to positions, origin defined in the center of the sensor
merged_cluster_array['x_dut_0'][selection] = pixel_size[0][0] * (actual_cluster_dut_0['mean_column'][selection] - 0.5 - (0.5 * n_pixels[0][0]))
merged_cluster_array['y_dut_0'][selection] = pixel_size[0][1] * (actual_cluster_dut_0['mean_row'][selection] - 0.5 - (0.5 * n_pixels[0][1]))
merged_cluster_array['z_dut_0'][selection] = 0.0
xerr = np.zeros(selection.shape)
yerr = np.zeros(selection.shape)
zerr = np.zeros(selection.shape)
xerr[selection] = actual_cluster_dut_0['err_column'][selection] * pixel_size[0][0]
yerr[selection] = actual_cluster_dut_0['err_row'][selection] * pixel_size[0][1]
merged_cluster_array['xerr_dut_0'][selection] = xerr[selection]
merged_cluster_array['yerr_dut_0'][selection] = yerr[selection]
merged_cluster_array['zerr_dut_0'][selection] = zerr[selection]
merged_cluster_array['charge_dut_0'][selection] = actual_cluster_dut_0['charge'][selection]
merged_cluster_array['n_hits_dut_0'][selection] = actual_cluster_dut_0['n_hits'][selection]
# Fill result array with other DUT data
# Second loop: get the cluster from all files and merge them to the common event number
for dut_index, cluster_file in enumerate(input_cluster_files[1:], start=1): # Loop over the other cluster files
with tb.open_file(cluster_file, mode='r') as actual_in_file_h5: # Open other DUT cluster file
for actual_cluster_dut, start_indices_data_loop[dut_index] in analysis_utils.data_aligned_at_events(actual_in_file_h5.root.Cluster, start_index=start_indices_data_loop[dut_index], start_event_number=common_event_numbers[0], stop_event_number=common_event_numbers[-1] + 1, chunk_size=chunk_size, fail_on_missing_events=False): # Loop over the cluster in the actual cluster file in chunks
actual_cluster_dut = analysis_utils.map_cluster(common_event_numbers, actual_cluster_dut)
# Select real hits, values with nan are virtual hits
selection = ~np.isnan(actual_cluster_dut['mean_column'])
# Convert indices to positions, origin in the center of the sensor, remaining DUTs
merged_cluster_array['x_dut_%d' % (dut_index)][selection] = pixel_size[dut_index][0] * (actual_cluster_dut['mean_column'][selection] - 0.5 - (0.5 * n_pixels[dut_index][0]))
merged_cluster_array['y_dut_%d' % (dut_index)][selection] = pixel_size[dut_index][1] * (actual_cluster_dut['mean_row'][selection] - 0.5 - (0.5 * n_pixels[dut_index][1]))
merged_cluster_array['z_dut_%d' % (dut_index)][selection] = 0.0
xerr = np.zeros(selection.shape)
yerr = np.zeros(selection.shape)
zerr = np.zeros(selection.shape)
xerr[selection] = actual_cluster_dut['err_column'][selection] * pixel_size[dut_index][0]
yerr[selection] = actual_cluster_dut['err_row'][selection] * pixel_size[dut_index][1]
merged_cluster_array['xerr_dut_%d' % (dut_index)][selection] = xerr[selection]
merged_cluster_array['yerr_dut_%d' % (dut_index)][selection] = yerr[selection]
merged_cluster_array['zerr_dut_%d' % (dut_index)][selection] = zerr[selection]
merged_cluster_array['charge_dut_%d' % (dut_index)][selection] = actual_cluster_dut['charge'][selection]
merged_cluster_array['n_hits_dut_%d' % (dut_index)][selection] = actual_cluster_dut['n_hits'][selection]
merged_cluster_table.append(merged_cluster_array)
actual_start_event_number = common_event_numbers[-1] + 1 # Set the starting event number for the next chunked read
progress_bar.update(start_indices_data_loop[0])
progress_bar.finish()
def prealignment(input_correlation_file, output_alignment_file, z_positions, pixel_size, s_n=0.1, fit_background=False, reduce_background=False, dut_names=None, no_fit=False, non_interactive=True, iterations=3, plot=True, gui=False, queue=False):
'''Deduce a pre-alignment from the correlations, by fitting the correlations with a straight line (gives offset, slope, but no tild angles).
The user can define cuts on the fit error and straight line offset in an interactive way.
Parameters
----------
input_correlation_file : string
Filename of the input correlation file.
output_alignment_file : string
Filename of the output alignment file.
z_positions : iterable
The z positions of the DUTs in um.
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension (column/row),
e.g. for two FE-I4 DUTs [(250, 50), (250, 50)].
s_n : float
The signal to noise ratio for peak signal over background peak. This should be specified when the background is fitted with a gaussian function.
Usually data with a lot if tracks per event have a gaussian background. A good S/N value can be estimated by investigating the correlation plot.
The default value is usually fine.
fit_background : bool
Data with a lot if tracks per event have a gaussian background from the beam profile. Also try to fit this background to determine the correlation
peak correctly. If you see a clear 2D gaussian in the correlation plot this shoud be activated. If you have 1-2 tracks per event and large pixels
this option should be off, because otherwise overfitting is possible.
reduce_background : bool
Reduce background (uncorrelated events) by using SVD of the 2D correlation array.
dut_names : iterable
Names of the DUTs. If None, the DUT index will be used.
no_fit : bool
Use Hough transformation to calculate slope and offset.
non_interactive : bool
Deactivate user interaction and estimate fit range automatically.
iterations : uint
The number of iterations in non-interactive mode.
plot : bool
If True, create additional output plots.
gui : bool
If True, this function is excecuted from GUI and returns figures
queue : bool, dict
If gui is True and non_interactive is False, queue is a dict with a in and output queue to communicate with GUI thread
'''
logging.info('=== Pre-alignment ===')
if no_fit:
if not reduce_background:
logging.warning("no_fit is True, setting reduce_background to True")
reduce_background = True
if reduce_background:
if fit_background:
logging.warning("reduce_background is True, setting fit_background to False")
fit_background = False
if plot is True and not gui:
output_pdf = PdfPages(os.path.splitext(output_alignment_file)[0] + '_prealigned.pdf', keep_empty=False)
else:
output_pdf = None
figs = [] if gui else None
with tb.open_file(input_correlation_file, mode="r") as in_file_h5:
n_duts = len(in_file_h5.list_nodes("/")) // 2 + 1 # no correlation for reference DUT0
result = np.zeros(shape=(n_duts,), dtype=[('DUT', np.uint8), ('column_c0', np.float), ('column_c0_error', np.float), ('column_c1', np.float), ('column_c1_error', np.float), ('column_sigma', np.float), ('column_sigma_error', np.float), ('row_c0', np.float), ('row_c0_error', np.float), ('row_c1', np.float), ('row_c1_error', np.float), ('row_sigma', np.float), ('row_sigma_error', np.float), ('z', np.float)])
# Set std. settings for reference DUT0
result[0]['column_c0'], result[0]['column_c0_error'] = 0.0, 0.0
result[0]['column_c1'], result[0]['column_c1_error'] = 1.0, 0.0
result[0]['row_c0'], result[0]['row_c0_error'] = 0.0, 0.0
result[0]['row_c1'], result[0]['row_c1_error'] = 1.0, 0.0
result[0]['z'] = z_positions[0]
for node in in_file_h5.root:
table_prefix = 'column' if 'column' in node.name.lower() else 'row'
indices = re.findall(r'\d+', node.name)
dut_idx = int(indices[0])
ref_idx = int(indices[1])
result[dut_idx]['DUT'] = dut_idx
dut_name = dut_names[dut_idx] if dut_names else ("DUT" + str(dut_idx))
ref_name = dut_names[ref_idx] if dut_names else ("DUT" + str(ref_idx))
logging.info('Aligning data from %s', node.name)
if "column" in node.name.lower():
pixel_size_dut, pixel_size_ref = pixel_size[dut_idx][0], pixel_size[ref_idx][0]
else:
pixel_size_dut, pixel_size_ref = pixel_size[dut_idx][1], pixel_size[ref_idx][1]
data = node[:]
n_pixel_dut, n_pixel_ref = data.shape[0], data.shape[1]
# Initialize arrays with np.nan (invalid), adding 0.5 to change from index to position
# matrix index 0 is cluster index 1 ranging from 0.5 to 1.4999, which becomes position 0.0 to 0.999 with center at 0.5, etc.
x_ref = (np.linspace(0.0, n_pixel_ref, num=n_pixel_ref, endpoint=False, dtype=np.float) + 0.5)
x_dut = (np.linspace(0.0, n_pixel_dut, num=n_pixel_dut, endpoint=False, dtype=np.float) + 0.5)
coeff_fitted = [None] * n_pixel_dut
mean_fitted = np.empty(shape=(n_pixel_dut,), dtype=np.float) # Peak of the Gauss fit
mean_fitted.fill(np.nan)
mean_error_fitted = np.empty(shape=(n_pixel_dut,), dtype=np.float) # Error of the fit of the peak
mean_error_fitted.fill(np.nan)
sigma_fitted = np.empty(shape=(n_pixel_dut,), dtype=np.float) # Sigma of the Gauss fit
sigma_fitted.fill(np.nan)
chi2 = np.empty(shape=(n_pixel_dut,), dtype=np.float) # Chi2 of the fit
chi2.fill(np.nan)
n_cluster = np.sum(data, axis=1) # Number of hits per bin
if reduce_background:
uu, dd, vv = np.linalg.svd(data) # sigular value decomposition
background = np.matrix(uu[:, :1]) * np.diag(dd[:1]) * np.matrix(vv[:1, :]) # take first sigular value for background
background = np.array(background, dtype=np.int32) # make Numpy array
data = (data - background).astype(np.int32) # remove background
data -= data.min() # only positive values
if no_fit:
# calculate half hight
median = np.median(data)
median_max = np.median(np.max(data, axis=1))
half_median_data = (data > ((median + median_max) / 2))
# calculate maximum per column
max_select = np.argmax(data, axis=1)
hough_data = np.zeros_like(data)
hough_data[np.arange(data.shape[0]), max_select] = 1
# select maximums if larger than half hight
hough_data = hough_data & half_median_data
# transpose for correct angle
hough_data = hough_data.T
accumulator, theta, rho, theta_edges, rho_edges = analysis_utils.hough_transform(hough_data, theta_res=0.1, rho_res=1.0, return_edges=True)
rho_idx, th_idx = np.unravel_index(accumulator.argmax(), accumulator.shape)
rho_val, theta_val = rho[rho_idx], theta[th_idx]
slope_idx, offset_idx = -np.cos(theta_val) / np.sin(theta_val), rho_val / np.sin(theta_val)
slope = slope_idx * (pixel_size_ref / pixel_size_dut)
offset = offset_idx * pixel_size_ref
# offset in the center of the pixel matrix
offset_center = offset + slope * pixel_size_dut * n_pixel_dut * 0.5 - pixel_size_ref * n_pixel_ref * 0.5
offset_center += 0.5 * pixel_size_ref - slope * 0.5 * pixel_size_dut # correct for half bin
result[dut_idx][table_prefix + '_c0'], result[dut_idx][table_prefix + '_c0_error'] = offset_center, 0.0
result[dut_idx][table_prefix + '_c1'], result[dut_idx][table_prefix + '_c1_error'] = slope, 0.0
result[dut_idx][table_prefix + '_sigma'], result[dut_idx][table_prefix + '_sigma_error'] = 0.0, 0.0
result[dut_idx]['z'] = z_positions[dut_idx]
plot_utils.plot_hough(x=x_dut,
data=hough_data,
accumulator=accumulator,
offset=offset_idx,
slope=slope_idx,
theta_edges=theta_edges,
rho_edges=rho_edges,
n_pixel_ref=n_pixel_ref,
n_pixel_dut=n_pixel_dut,
pixel_size_ref=pixel_size_ref,
pixel_size_dut=pixel_size_dut,
ref_name=ref_name,
dut_name=dut_name,
prefix=table_prefix,
output_pdf=output_pdf,
gui=gui,
figs=figs)
else:
# fill the arrays from above with values
_fit_data(x=x_ref, data=data, s_n=s_n, coeff_fitted=coeff_fitted, mean_fitted=mean_fitted, mean_error_fitted=mean_error_fitted, sigma_fitted=sigma_fitted, chi2=chi2, fit_background=fit_background, reduce_background=reduce_background)
# Convert fit results to metric units for alignment fit
# Origin is center of pixel matrix
x_dut_scaled = (x_dut - 0.5 * n_pixel_dut) * pixel_size_dut
mean_fitted_scaled = (mean_fitted - 0.5 * n_pixel_ref) * pixel_size_ref
mean_error_fitted_scaled = mean_error_fitted * pixel_size_ref
# Selected data arrays
x_selected = x_dut.copy()
x_dut_scaled_selected = x_dut_scaled.copy()
mean_fitted_scaled_selected = mean_fitted_scaled.copy()
mean_error_fitted_scaled_selected = mean_error_fitted_scaled.copy()
sigma_fitted_selected = sigma_fitted.copy()
chi2_selected = chi2.copy()
n_cluster_selected = n_cluster.copy()
# Show the straigt line correlation fit including fit errors and offsets from the fit
# Let the user change the cuts (error limit, offset limit) and refit until result looks good
refit = True
selected_data = np.ones_like(x_dut, dtype=np.bool)
actual_iteration = 0 # Refit counter for non interactive mode
while refit:
if gui and not non_interactive:
# Put data in queue to be processed interactively on GUI thread
queue['in'].put([x_dut_scaled_selected, mean_fitted_scaled_selected,
mean_error_fitted_scaled_selected, n_cluster_selected,
ref_name, dut_name, table_prefix])
# Blocking statement to wait for processed data from GUI thread
selected_data, fit, refit = queue['out'].get()
else:
selected_data, fit, refit = plot_utils.plot_prealignments(x=x_dut_scaled_selected,
mean_fitted=mean_fitted_scaled_selected,
mean_error_fitted=mean_error_fitted_scaled_selected,
n_cluster=n_cluster_selected,
ref_name=ref_name,
dut_name=dut_name,
prefix=table_prefix,
non_interactive=non_interactive)
x_selected = x_selected[selected_data]
x_dut_scaled_selected = x_dut_scaled_selected[selected_data]
mean_fitted_scaled_selected = mean_fitted_scaled_selected[selected_data]
mean_error_fitted_scaled_selected = mean_error_fitted_scaled_selected[selected_data]
sigma_fitted_selected = sigma_fitted_selected[selected_data]
chi2_selected = chi2_selected[selected_data]
n_cluster_selected = n_cluster_selected[selected_data]
# Stop in non interactive mode if the number of refits (iterations) is reached
if non_interactive:
actual_iteration += 1
if actual_iteration >= iterations:
break
# Linear fit, usually describes correlation very well, slope is close to 1.
# With low energy beam and / or beam with diverse agular distribution, the correlation will not be perfectly straight
# Use results from straight line fit as start values for this final fit
re_fit, re_fit_pcov = curve_fit(analysis_utils.linear, x_dut_scaled_selected, mean_fitted_scaled_selected, sigma=mean_error_fitted_scaled_selected, absolute_sigma=True, p0=[fit[0], fit[1]])
# Write fit results to array
result[dut_idx][table_prefix + '_c0'], result[dut_idx][table_prefix + '_c0_error'] = re_fit[0], np.absolute(re_fit_pcov[0][0]) ** 0.5
result[dut_idx][table_prefix + '_c1'], result[dut_idx][table_prefix + '_c1_error'] = re_fit[1], np.absolute(re_fit_pcov[1][1]) ** 0.5
result[dut_idx]['z'] = z_positions[dut_idx]
# Calculate mean sigma (is a residual when assuming straight tracks) and its error and store the actual data in result array
# This error is needed for track finding and track quality determination
mean_sigma = pixel_size_ref * np.mean(np.array(sigma_fitted_selected))
mean_sigma_error = pixel_size_ref * np.std(np.array(sigma_fitted_selected)) / np.sqrt(np.array(sigma_fitted_selected).shape[0])
result[dut_idx][table_prefix + '_sigma'], result[dut_idx][table_prefix + '_sigma_error'] = mean_sigma, mean_sigma_error
# Calculate the index of the beam center based on valid indices
plot_index = np.average(x_selected - 1, weights=np.sum(data, axis=1)[np.array(x_selected - 1, dtype=np.int32)])
# Find nearest valid index to the calculated index
idx = (np.abs(x_selected - 1 - plot_index)).argmin()
plot_index = np.array(x_selected - 1, dtype=np.int32)[idx]
x_fit = np.linspace(start=x_ref.min(), stop=x_ref.max(), num=500, endpoint=True)
indices_lower = np.arange(plot_index)
indices_higher = np.arange(plot_index, n_pixel_dut)
alternating_indices = np.vstack((np.hstack([indices_higher, indices_lower[::-1]]), np.hstack([indices_lower[::-1], indices_higher]))).reshape((-1,), order='F')
unique_indices = np.unique(alternating_indices, return_index=True)[1]
alternating_indices = alternating_indices[np.sort(unique_indices)]
for plot_index in alternating_indices:
plot_correlation_fit = False
if coeff_fitted[plot_index] is not None:
plot_correlation_fit = True
break
if plot_correlation_fit:
if np.all(np.isnan(coeff_fitted[plot_index][3:6])):
y_fit = analysis_utils.gauss_offset(x_fit, *coeff_fitted[plot_index][[0, 1, 2, 6]])
fit_label = "Gauss-Offset"
else:
y_fit = analysis_utils.double_gauss_offset(x_fit, *coeff_fitted[plot_index])
fit_label = "Gauss-Gauss-Offset"
plot_utils.plot_correlation_fit(x=x_ref,
y=data[plot_index, :],
x_fit=x_fit,
y_fit=y_fit,
xlabel='%s %s' % ("Column" if "column" in node.name.lower() else "Row", ref_name),
fit_label=fit_label,
title="Correlation of %s: %s vs. %s at %s %d" % (table_prefix + "s", ref_name, dut_name, table_prefix, plot_index),
output_pdf=output_pdf,
gui=gui,
figs=figs)
else:
logging.warning("Cannot plot correlation fit, no fit data available")
# Plot selected data with fit
fit_fn = np.poly1d(re_fit[::-1])
selected_indices = np.searchsorted(x_dut_scaled, x_dut_scaled_selected)
mask = np.zeros_like(x_dut_scaled, dtype=np.bool)
mask[selected_indices] = True
plot_utils.plot_prealignment_fit(x=x_dut_scaled,
mean_fitted=mean_fitted_scaled,
mask=mask,
fit_fn=fit_fn,
fit=re_fit,
pcov=re_fit_pcov,
chi2=chi2,
mean_error_fitted=mean_error_fitted_scaled,
n_cluster=n_cluster,
n_pixel_ref=n_pixel_ref,
n_pixel_dut=n_pixel_dut,
pixel_size_ref=pixel_size_ref,
pixel_size_dut=pixel_size_dut,
ref_name=ref_name,
dut_name=dut_name,
prefix=table_prefix,
output_pdf=output_pdf,
gui=gui,
figs=figs)
if gui and not non_interactive:
queue['in'].put([None]) # Put random element in queue to signal GUI thread end of interactive prealignment
logging.info('Store pre-alignment data in %s', output_alignment_file)
with tb.open_file(output_alignment_file, mode="w") as out_file_h5:
try:
result_table = out_file_h5.create_table(out_file_h5.root, name='PreAlignment', description=result.dtype, title='Prealignment alignment from correlation', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
result_table.append(result)
except tb.exceptions.NodeError:
logging.warning('Coarse alignment table exists already. Do not create new.')
if output_pdf is not None:
output_pdf.close()
if gui:
return figs
def _fit_data(x, data, s_n, coeff_fitted, mean_fitted, mean_error_fitted, sigma_fitted, chi2, fit_background, reduce_background):
def calc_limits_from_fit(x, coeff):
''' Calculates the fit limits from the last successfull fit.'''
limits = [
[0.1 * coeff[0], x.min(), 0.5 * coeff[2], 0.01 * coeff[3], x.min(), 0.5 * coeff[5], 0.5 * coeff[6]],
[10.0 * coeff[0], x.max(), 2.0 * coeff[2], 10.0 * coeff[3], x.max(), 2.0 * coeff[5], 2.0 * coeff[6]]
]
# Fix too small sigma, sigma < 1 is unphysical
if limits[1][2] < 1.:
limits[1][2] = 10.
return limits
def signal_sanity_check(coeff, s_n, A_peak):
''' Sanity check if signal was deducted correctly from background.
3 Conditions:
1. The given signal to noise value has to be fullfilled: S/N > Amplitude Signal / ( Amplidude background + Offset)
2. The signal + background has to be large enough: Amplidute 1 + Amplitude 2 + Offset > Data maximum / 2
3. The Signal Sigma has to be smaller than the background sigma, otherwise beam would be larger than one pixel pitch
'''
if coeff[0] < (coeff[3] + coeff[6]) * s_n or coeff[0] + coeff[3] + coeff[6] < A_peak / 2.0 or coeff[2] > coeff[5] / 2.0:
return False
return True
n_pixel_dut, n_pixel_ref = data.shape[0], data.shape[1]
# Start values for fitting
# Correlation peak
mu_peak = x[np.argmax(data, axis=1)]
A_peak = np.max(data, axis=1) # signal / correlation peak
# Background of uncorrelated data
n_entries = np.sum(data, axis=1)
A_background = np.mean(data, axis=1) # noise / background halo
mu_background = np.zeros_like(n_entries)
mu_background[n_entries > 0] = np.average(data, axis=1, weights=x)[n_entries > 0] * np.sum(x) / n_entries[n_entries > 0]
coeff = None
fit_converged = False # To signal that las fit was good, thus the results can be taken as start values for next fit
# for logging
no_correlation_indices = []
few_correlation_indices = []
# get index of the highest background value
fit_start_index = np.argmax(A_background)
indices_lower = np.arange(fit_start_index)[::-1]
indices_higher = np.arange(fit_start_index, n_pixel_dut)
stacked_indices = np.hstack([indices_lower, indices_higher])
for index in stacked_indices: # Loop over x dimension of correlation histogram
if index == fit_start_index:
if index > 0 and coeff_fitted[index - 1] is not None:
coeff = coeff_fitted[index - 1]
fit_converged = True
else:
fit_converged = False
# TODO: start fitting from the beam center to get a higher chance to pick up the correlation peak
# omit correlation fit with no entries / correlation (e.g. sensor edges, masked columns)
if np.all(data[index, :] == 0):
no_correlation_indices.append(index)
continue
# omit correlation fit if sum of correlation entries is < 1 % of total entries devided by number of indices
# (e.g. columns not in the beam)
n_cluster_curr_index = data[index, :].sum()
if fit_converged and n_cluster_curr_index < data.sum() / n_pixel_dut * 0.01:
few_correlation_indices.append(index)
continue
# Set start parameters and fit limits
# Parameters: A_1, mu_1, sigma_1, A_2, mu_2, sigma_2, offset
if fit_converged and not reduce_background: # Set start values from last successfull fit, no large difference expected
p0 = coeff # Set start values from last successfull fit
bounds = calc_limits_from_fit(x, coeff) # Set boundaries from previous converged fit
else: # No (last) successfull fit, try to dedeuce reasonable start values
p0 = [A_peak[index], mu_peak[index], 5.0, A_background[index], mu_background[index], analysis_utils.get_rms_from_histogram(data[index, :], x), 0.0]
bounds = [[0.0, x.min(), 0.0, 0.0, x.min(), 0.0, 0.0], [2.0 * A_peak[index], x.max(), x.max() - x.min(), 2.0 * A_peak[index], x.max(), np.inf, A_peak[index]]]
# Fit correlation
if fit_background: # Describe background with addidional gauss + offset
try:
coeff, var_matrix = curve_fit(analysis_utils.double_gauss_offset, x, data[index, :], p0=p0, bounds=bounds)
except RuntimeError: # curve_fit failed
fit_converged = False
else:
fit_converged = True
# do some result checks
if not signal_sanity_check(coeff, s_n, A_peak[index]):
logging.debug('No correlation peak found. Try another fit...')
# Use parameters from last fit as start parameters for the refit
y_fit = analysis_utils.double_gauss_offset(x, *coeff)
try:
coeff, var_matrix = refit_advanced(x_data=x, y_data=data[index, :], y_fit=y_fit, p0=coeff)
except RuntimeError: # curve_fit failed
fit_converged = False
else:
fit_converged = True
# Check result again:
if not signal_sanity_check(coeff, s_n, A_peak[index]):
logging.debug('No correlation peak found after refit!')
fit_converged = False
else: # Describe background with offset only.
# Change start parameters and boundaries
p0_gauss_offset = [p0_val for i, p0_val in enumerate(p0) if i in (0, 1, 2, 6)]
bounds_gauss_offset = [0, np.inf]
bounds_gauss_offset[0] = [bound_val for i, bound_val in enumerate(bounds[0]) if i in (0, 1, 2, 6)]
bounds_gauss_offset[1] = [bound_val for i, bound_val in enumerate(bounds[1]) if i in (0, 1, 2, 6)]
try:
coeff_gauss_offset, var_matrix = curve_fit(analysis_utils.gauss_offset, x, data[index, :], p0=p0_gauss_offset, bounds=bounds_gauss_offset)
except RuntimeError: # curve_fit failed
fit_converged = False
else:
# Correlation should have at least 2 entries to avoid random fluctuation peaks to be selected
if coeff_gauss_offset[0] > 2:
fit_converged = True
# Change back coefficents
coeff = np.insert(coeff_gauss_offset, 3, [np.nan] * 3) # Parameters: A_1, mu_1, sigma_1, A_2, mu_2, sigma_2, offset
else:
fit_converged = False
# Set fit results for given index if successful
if fit_converged:
coeff_fitted[index] = coeff
mean_fitted[index] = coeff[1]
mean_error_fitted[index] = np.sqrt(np.abs(np.diag(var_matrix)))[1]
sigma_fitted[index] = np.abs(coeff[2])
chi2[index] = analysis_utils.get_chi2(y_data=data[index, :], y_fit=analysis_utils.double_gauss_offset(x, *coeff))
if no_correlation_indices:
logging.info('No correlation entries for indices %s. Omit correlation fit.', str(no_correlation_indices)[1:-1])
if few_correlation_indices:
logging.info('Very few correlation entries for indices %s. Omit correlation fit.', str(few_correlation_indices)[1:-1])
def refit_advanced(x_data, y_data, y_fit, p0):
''' Substract the fit from the data, thus only the small signal peak should be left.
Fit this peak, and refit everything with start values'''
y_peak = y_data - y_fit # Fit most likely only describes background, thus substract it
peak_A = np.max(y_peak) # Determine start value for amplitude
peak_mu = np.argmax(y_peak) # Determine start value for mu
fwhm_1, fwhm_2 = analysis_utils.fwhm(x_data, y_peak)
peak_sigma = (fwhm_2 - fwhm_1) / 2.35 # Determine start value for sigma
# Fit a Gauss + Offset to the background substracted data
coeff_peak, _ = curve_fit(analysis_utils.gauss_offset_slope, x_data, y_peak, p0=[peak_A, peak_mu, peak_sigma, 0.0, 0.0], bounds=([0.0, 0.0, 0.0, -10000.0, -10.0], [1.1 * peak_A, np.inf, np.inf, 10000.0, 10.0]))
# Refit orignial double Gauss function with proper start values for the small signal peak
coeff, var_matrix = curve_fit(analysis_utils.double_gauss_offset, x_data, y_data, p0=[coeff_peak[0], coeff_peak[1], coeff_peak[2], p0[3], p0[4], p0[5], p0[6]], bounds=[0.0, np.inf])
return coeff, var_matrix
def apply_alignment(input_hit_file, input_alignment_file, output_hit_file, inverse=False,
force_prealignment=False, no_z=False, use_duts=None, chunk_size=1000000):
''' Takes a file with tables containing hit information (x, y, z) and applies the alignment to each DUT hit (positions and errors).
The alignment data is used. If this is not available a fallback to the pre-alignment is done.
One can also inverse the alignment or apply the alignment without changing the z position.
Note:
-----
This function cannot be easily made faster with multiprocessing since the computation function (apply_alignment_to_chunk) does not
contribute significantly to the runtime (< 20 %), but the copy overhead for not shared memory needed for multipgrocessing is higher.
Also the hard drive IO can be limiting (30 Mb/s read, 20 Mb/s write to the same disk)
Parameters
----------
input_hit_file : string
Filename of the input hits file (e.g. merged data file, tracklets file, etc.).
input_alignment_file : string
Filename of the input alignment file.
output_hit_file : string
Filename of the output hits file with hit data after alignment was applied.
inverse : bool
If True, apply the inverse alignment.
force_prealignment : bool
If True, use pre-alignment, even if alignment data is availale.
no_z : bool
If True, do not change the z alignment. Needed since the z position is special for x / y based plane measurements.
use_duts : iterable
Iterable of DUT indices to apply the alignment to. If None, use all DUTs.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('== Apply alignment to %s ==', input_hit_file)
use_prealignment = True if force_prealignment else False
try:
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
if use_prealignment:
logging.info('Use pre-alignment data')
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
else:
logging.info('Use alignment data')
alignment = in_file_h5.root.Alignment[:]
n_duts = alignment.shape[0]
except TypeError: # The input_alignment_file is an array
alignment = input_alignment_file
try: # Check if array is prealignent array
alignment['column_c0']
logging.info('Use pre-alignment data')
n_duts = prealignment.shape[0]
use_prealignment = True
except ValueError:
logging.info('Use alignment data')
n_duts = alignment.shape[0]
use_prealignment = False
def apply_alignment_to_chunk(hits_chunk, dut_index, use_prealignment, alignment, inverse, no_z):
if use_prealignment: # Apply transformation from pre-alignment information
(hits_chunk['x_dut_%d' % dut_index],
hits_chunk['y_dut_%d' % dut_index],
hit_z,
hits_chunk['xerr_dut_%d' % dut_index],
hits_chunk['yerr_dut_%d' % dut_index],
hits_chunk['zerr_dut_%d' % dut_index]) = geometry_utils.apply_alignment(
hits_x=hits_chunk['x_dut_%d' % dut_index],
hits_y=hits_chunk['y_dut_%d' % dut_index],
hits_z=hits_chunk['z_dut_%d' % dut_index],
hits_xerr=hits_chunk['xerr_dut_%d' % dut_index],
hits_yerr=hits_chunk['yerr_dut_%d' % dut_index],
hits_zerr=hits_chunk['zerr_dut_%d' % dut_index],
dut_index=dut_index,
prealignment=prealignment,
inverse=inverse)
else: # Apply transformation from fine alignment information
(hits_chunk['x_dut_%d' % dut_index],
hits_chunk['y_dut_%d' % dut_index],
hit_z,
hits_chunk['xerr_dut_%d' % dut_index],
hits_chunk['yerr_dut_%d' % dut_index],
hits_chunk['zerr_dut_%d' % dut_index]) = geometry_utils.apply_alignment(
hits_x=hits_chunk['x_dut_%d' % dut_index],
hits_y=hits_chunk['y_dut_%d' % dut_index],
hits_z=hits_chunk['z_dut_%d' % dut_index],
hits_xerr=hits_chunk['xerr_dut_%d' % dut_index],
hits_yerr=hits_chunk['yerr_dut_%d' % dut_index],
hits_zerr=hits_chunk['zerr_dut_%d' % dut_index],
dut_index=dut_index,
alignment=alignment,
inverse=inverse)
if not no_z:
hits_chunk['z_dut_%d' % dut_index] = hit_z
# Looper over the hits of all DUTs of all hit tables in chunks and apply the alignment
with tb.open_file(input_hit_file, mode='r') as in_file_h5:
with tb.open_file(output_hit_file, mode='w') as out_file_h5:
for node in in_file_h5.root: # Loop over potential hit tables in data file
hits = node
new_node_name = hits.name
if new_node_name == 'MergedCluster': # Merged cluster with alignment are tracklets
new_node_name = 'Tracklets'
hits_aligned_table = out_file_h5.create_table(out_file_h5.root, name=new_node_name, description=np.zeros((1,), dtype=hits.dtype).dtype, title=hits.title, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hits.shape[0], term_width=80)
progress_bar.start()
for hits_chunk, index in analysis_utils.data_aligned_at_events(hits, chunk_size=chunk_size): # Loop over the hits
for dut_index in range(0, n_duts): # Loop over the DUTs in the hit table
if use_duts is not None and dut_index not in use_duts: # omit DUT
continue
apply_alignment_to_chunk(hits_chunk=hits_chunk, dut_index=dut_index, use_prealignment=use_prealignment, alignment=prealignment if use_prealignment else alignment, inverse=inverse, no_z=no_z)
hits_aligned_table.append(hits_chunk)
progress_bar.update(index)
progress_bar.finish()
logging.debug('File with realigned hits %s', output_hit_file)
def alignment(input_track_candidates_file, input_alignment_file, n_pixels, pixel_size, align_duts=None, selection_fit_duts=None, selection_hit_duts=None, selection_track_quality=1, initial_rotation=None, initial_translation=None, max_iterations=10, use_n_tracks=200000, plot=False, chunk_size=100000):
''' This function does an alignment of the DUTs and sets translation and rotation values for all DUTs.
The reference DUT defines the global coordinate system position at 0, 0, 0 and should be well in the beam and not heavily rotated.
To solve the chicken-and-egg problem that a good dut alignment needs hits belonging to one track, but good track finding needs a good dut alignment this
function work only on already prealigned hits belonging to one track. Thus this function can be called only after track finding.
These steps are done
1. Take the found tracks and revert the pre-alignment
2. Take the track hits belonging to one track and fit tracks for all DUTs
3. Calculate the residuals for each DUT
4. Deduce rotations from the residuals and apply them to the hits
5. Deduce the translation of each plane
6. Store and apply the new alignment
repeat step 3 - 6 until the total residual does not decrease (RMS_total = sqrt(RMS_x_1^2 + RMS_y_1^2 + RMS_x_2^2 + RMS_y_2^2 + ...))
Parameters
----------
input_track_candidates_file : string
file name with the track candidates table
input_alignment_file : pytables file
File name of the input aligment data
n_pixels : iterable of tuples
One tuple per DUT describing the total number of pixels (column/row),
e.g. for two FE-I4 DUTs [(80, 336), (80, 336)].
pixel_size : iterable of tuples
One tuple per DUT describing the pixel dimension (column/row),
e.g. for two FE-I4 DUTs [(250, 50), (250, 50)].
align_duts : iterable or iterable of iterable
The combination of duts that are algined at once. One should always align the high resolution planes first.
E.g. for a telesope (first and last 3 planes) with 2 devices in the center (3, 4):
align_duts=[[0, 1, 2, 5, 6, 7], # align the telescope planes first
[4], # Align first DUT
[3]], # Align second DUT
selection_fit_duts : iterable or iterable of iterable
Defines for each align_duts combination wich devices to use in the track fit.
E.g. To use only the telescope planes (first and last 3 planes) but not the 2 center devices
selection_fit_duts=[0, 1, 2, 5, 6, 7]
selection_hit_duts : iterable or iterable of iterable
Defines for each align_duts combination wich devices must have a hit to use the track for fitting. The hit
does not have to be used in the fit itself! This is useful for time reference planes.
E.g. To use telescope planes (first and last 3 planes) + time reference plane (3)
selection_hit_duts = [0, 1, 2, 4, 5, 6, 7]
selection_track_quality : uint or iterable or iterable of iterable
Track quality for each hit DUT.
initial_rotation : array
Initial rotation array.
initial_translation : array
Initial translation array.
max_iterations : uint
Maximum number of iterations of calc residuals, apply rotation refit loop until constant result is expected.
Usually the procedure converges rather fast (< 5 iterations)
use_n_tracks : uint
Defines the amount of tracks to be used for the alignment. More tracks can potentially make the result
more precise, but will also increase the calculation time.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
logging.info('=== Aligning DUTs ===')
# Open the pre-alignment and create empty alignment info (at the beginning only the z position is set)
with tb.open_file(input_alignment_file, mode="r") as in_file_h5: # Open file with alignment data
prealignment = in_file_h5.root.PreAlignment[:]
n_duts = prealignment.shape[0]
alignment_parameters = _create_alignment_array(n_duts)
alignment_parameters['translation_z'] = prealignment['z']
if initial_rotation:
if isinstance(initial_rotation[0], Iterable):
for dut_index in range(n_duts):
alignment_parameters['alpha'][dut_index] = initial_rotation[dut_index][0]
alignment_parameters['beta'][dut_index] = initial_rotation[dut_index][1]
alignment_parameters['gamma'][dut_index] = initial_rotation[dut_index][2]
else:
for dut_index in range(n_duts):
alignment_parameters['alpha'][dut_index] = initial_rotation[0]
alignment_parameters['beta'][dut_index] = initial_rotation[1]
alignment_parameters['gamma'][dut_index] = initial_rotation[2]
if initial_translation:
if isinstance(initial_translation[0], Iterable):
for dut_index in range(n_duts):
alignment_parameters['translation_x'][dut_index] = initial_translation[dut_index][0]
alignment_parameters['translation_y'][dut_index] = initial_translation[dut_index][1]
else:
for dut_index in range(n_duts):
alignment_parameters['translation_x'][dut_index] = initial_translation[0]
alignment_parameters['translation_y'][dut_index] = initial_translation[1]
if np.any(np.abs(alignment_parameters['alpha']) > np.pi / 4.) or np.any(np.abs(alignment_parameters['beta']) > np.pi / 4.) or np.any(np.abs(alignment_parameters['gamma']) > np.pi / 4.):
logging.warning('A rotation angle > pi / 4 is not supported, you should set the correct angle and translation as a start parameter, sorry!')
geometry_utils.store_alignment_parameters(
input_alignment_file,
alignment_parameters=alignment_parameters,
mode='absolute')
# Create list with combinations of DUTs to align
if align_duts is None: # If None: align all DUTs
align_duts = range(n_duts)
# Check for value errors
if not isinstance(align_duts, Iterable):
raise ValueError("align_duts is no iterable")
elif not align_duts: # empty iterable
raise ValueError("align_duts has no items")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), align_duts)):
align_duts = [align_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), align_duts)):
raise ValueError("not all items in align_duts are iterable")
# Finally check length of all iterables in iterable
for dut in align_duts:
if not dut: # check the length of the items
raise ValueError("item in align_duts has length 0")
# Check if some DUTs will not be aligned
all_align_duts = []
for duts in align_duts:
all_align_duts.extend(duts)
no_align_duts = set(range(n_duts)) - set(all_align_duts)
if no_align_duts:
logging.warning('These DUTs will not be aligned: %s', ", ".join(str(align_dut) for align_dut in no_align_duts))
# Create track, hit selection
if selection_hit_duts is None: # If None: use all DUTs
selection_hit_duts = []
# copy each item
for duts in align_duts:
selection_hit_duts.append(duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(selection_hit_duts, Iterable):
raise ValueError("selection_hit_duts is no iterable")
elif not selection_hit_duts: # empty iterable
raise ValueError("selection_hit_duts has no items")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), selection_hit_duts)):
selection_hit_duts = [selection_hit_duts[:] for _ in align_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), selection_hit_duts)):
raise ValueError("not all items in selection_hit_duts are iterable")
# Finally check length of all arrays
if len(selection_hit_duts) != len(align_duts): # empty iterable
raise ValueError("selection_hit_duts has the wrong length")
for hit_dut in selection_hit_duts:
if len(hit_dut) < 2: # check the length of the items
raise ValueError("item in selection_hit_duts has length < 2")
# Create track, hit selection
if selection_fit_duts is None: # If None: use all DUTs
selection_fit_duts = []
# copy each item
for hit_duts in selection_hit_duts:
selection_fit_duts.append(hit_duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(selection_fit_duts, Iterable):
raise ValueError("selection_fit_duts is no iterable")
elif not selection_fit_duts: # empty iterable
raise ValueError("selection_fit_duts has no items")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), selection_fit_duts)):
selection_fit_duts = [selection_fit_duts[:] for _ in align_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), selection_fit_duts)):
raise ValueError("not all items in selection_fit_duts are iterable")
# Finally check length of all arrays
if len(selection_fit_duts) != len(align_duts): # empty iterable
raise ValueError("selection_fit_duts has the wrong length")
for index, fit_dut in enumerate(selection_fit_duts):
if len(fit_dut) < 2: # check the length of the items
raise ValueError("item in selection_fit_duts has length < 2")
if set(fit_dut) - set(selection_hit_duts[index]): # fit DUTs are required to have a hit
raise ValueError("DUT in selection_fit_duts is not in selection_hit_duts")
# Create track, hit selection
if not isinstance(selection_track_quality, Iterable): # all items the same, special case for selection_track_quality
selection_track_quality = [[selection_track_quality] * len(hit_duts) for hit_duts in selection_hit_duts] # every hit DUTs require a track quality value
# Check iterable and length
if not isinstance(selection_track_quality, Iterable):
raise ValueError("selection_track_quality is no iterable")
elif not selection_track_quality: # empty iterable
raise ValueError("selection_track_quality has no items")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), selection_track_quality)):
selection_track_quality = [selection_track_quality for _ in align_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), selection_track_quality)):
raise ValueError("not all items in selection_track_quality are iterable")
# Finally check length of all arrays
if len(selection_track_quality) != len(align_duts): # empty iterable
raise ValueError("selection_track_quality has the wrong length")
for index, track_quality in enumerate(selection_track_quality):
if len(track_quality) != len(selection_hit_duts[index]): # check the length of each items
raise ValueError("item in selection_track_quality and selection_hit_duts does not have the same length")
# Loop over all combinations of DUTs to align, simplest case: use all DUTs at once to align
# Usual case: align high resolution devices first, then other devices
for index, actual_align_duts in enumerate(align_duts):
logging.info('Aligning DUTs: %s', ", ".join(str(dut) for dut in actual_align_duts))
_duts_alignment(
track_candidates_file=input_track_candidates_file,
alignment_file=input_alignment_file,
alignment_index=index,
align_duts=actual_align_duts,
selection_fit_duts=selection_fit_duts[index],
selection_hit_duts=selection_hit_duts[index],
selection_track_quality=selection_track_quality[index],
n_pixels=n_pixels,
pixel_size=pixel_size,
use_n_tracks=use_n_tracks,
n_duts=n_duts,
max_iterations=max_iterations,
plot=plot,
chunk_size=chunk_size)
logging.info('Alignment finished successfully!')
def _duts_alignment(track_candidates_file, alignment_file, alignment_index, align_duts, selection_fit_duts, selection_hit_duts, selection_track_quality, n_pixels, pixel_size, use_n_tracks, n_duts, max_iterations, plot=True, chunk_size=100000): # Called for each list of DUTs to align
# Step 0: Reduce the number of tracks to increase the calculation time
logging.info('= Alignment step 0: Reduce number of tracks to %d =', use_n_tracks)
track_quality_mask = 0
for index, dut in enumerate(selection_hit_duts):
for quality in range(3):
if quality <= selection_track_quality[index]:
track_quality_mask |= ((1 << dut) << quality * 8)
logging.info('Use track with hits in DUTs %s', str(selection_hit_duts)[1:-1])
data_selection.select_hits(hit_file=track_candidates_file,
output_file=os.path.splitext(track_candidates_file)[0] + '_reduced_%d.h5' % alignment_index,
max_hits=use_n_tracks,
track_quality=track_quality_mask,
track_quality_mask=track_quality_mask,
chunk_size=chunk_size)
track_candidates_reduced = os.path.splitext(track_candidates_file)[0] + '_reduced_%d.h5' % alignment_index
# Step 1: Take the found tracks and revert the pre-alignment to start alignment from the beginning
logging.info('= Alignment step 1: Revert pre-alignment =')
apply_alignment(input_hit_file=track_candidates_reduced,
input_alignment_file=alignment_file, # Revert prealignent
output_hit_file=os.path.splitext(track_candidates_reduced)[0] + '_not_aligned.h5',
inverse=True,
force_prealignment=True,
chunk_size=chunk_size)
# Stage N: Repeat alignment with constrained residuals until total residual does not decrease anymore
_calculate_translation_alignment(track_candidates_file=os.path.splitext(track_candidates_reduced)[0] + '_not_aligned.h5',
alignment_file=alignment_file,
fit_duts=align_duts,
selection_fit_duts=selection_fit_duts,
selection_hit_duts=selection_hit_duts,
selection_track_quality=selection_track_quality,
n_pixels=n_pixels,
pixel_size=pixel_size,
n_duts=n_duts,
max_iterations=max_iterations,
plot_title_prefix='',
output_pdf=None,
chunk_size=chunk_size)
# Plot final result
if plot:
logging.info('= Alignment step 7: Plot final result =')
with PdfPages(os.path.join(os.path.dirname(os.path.realpath(track_candidates_file)), 'Alignment_%d.pdf' % alignment_index), keep_empty=False) as output_pdf:
# Apply final alignment result
apply_alignment(input_hit_file=os.path.splitext(track_candidates_reduced)[0] + '_not_aligned.h5',
input_alignment_file=alignment_file,
output_hit_file=os.path.splitext(track_candidates_file)[0] + '_final_tmp_%s.h5' % alignment_index,
chunk_size=chunk_size)
fit_tracks(input_track_candidates_file=os.path.splitext(track_candidates_file)[0] + '_final_tmp_%d.h5' % alignment_index,
input_alignment_file=alignment_file,
output_tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_final_tmp_%d.h5' % alignment_index,
fit_duts=align_duts, # Only create residuals of selected DUTs
selection_fit_duts=selection_fit_duts, # Only use selected duts
selection_hit_duts=selection_hit_duts,
exclude_dut_hit=True, # For unconstrained residuals
selection_track_quality=selection_track_quality,
chunk_size=chunk_size)
calculate_residuals(input_tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_final_tmp_%d.h5' % alignment_index,
input_alignment_file=alignment_file,
output_residuals_file=os.path.splitext(track_candidates_file)[0] + '_residuals_final_tmp_%d.h5' % alignment_index,
n_pixels=n_pixels,
pixel_size=pixel_size,
plot=plot,
chunk_size=chunk_size)
os.remove(os.path.splitext(track_candidates_file)[0] + '_final_tmp_%d.h5' % alignment_index)
os.remove(os.path.splitext(track_candidates_file)[0] + '_tracks_final_tmp_%d.h5' % alignment_index)
os.remove(os.path.splitext(track_candidates_file)[0] + '_tracks_final_tmp_%d.pdf' % alignment_index)
os.remove(os.path.splitext(track_candidates_file)[0] + '_residuals_final_tmp_%d.h5' % alignment_index)
os.remove(os.path.splitext(track_candidates_reduced)[0] + '_not_aligned.h5')
os.remove(os.path.splitext(track_candidates_file)[0] + '_reduced_%d.h5' % alignment_index)
def _calculate_translation_alignment(track_candidates_file, alignment_file, fit_duts, selection_fit_duts, selection_hit_duts, selection_track_quality, n_pixels, pixel_size, n_duts, max_iterations, plot_title_prefix='', output_pdf=None, chunk_size=100000):
''' Main function that fits tracks, calculates the residuals, deduces rotation and translation values from the residuals
and applies the new alignment to the track hits. The alignment result is scored as a combined
residual value of all planes that are being aligned in x and y weighted by the pixel pitch in x and y. '''
with tb.open_file(alignment_file, mode="r") as in_file_h5: # Open file with alignment data
alignment_last_iteration = in_file_h5.root.Alignment[:]
total_residual = None
for iteration in range(max_iterations):
if iteration >= max_iterations:
raise RuntimeError('Did not converge to good solution in %d iterations. Increase max_iterations', iteration)
apply_alignment(input_hit_file=track_candidates_file, # Always apply alignment to starting file
input_alignment_file=alignment_file,
output_hit_file=os.path.splitext(track_candidates_file)[0] + '_no_align_%d_tmp.h5' % iteration,
inverse=False,
force_prealignment=False,
chunk_size=chunk_size)
# Step 2: Fit tracks for all DUTs
logging.info('= Alignment step 2 / iteration %d: Fit tracks for all DUTs =', iteration)
fit_tracks(input_track_candidates_file=os.path.splitext(track_candidates_file)[0] + '_no_align_%d_tmp.h5' % iteration,
input_alignment_file=alignment_file,
output_tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.h5' % iteration,
fit_duts=fit_duts, # Only create residuals of selected DUTs
selection_fit_duts=selection_fit_duts, # Only use selected DUTs for track fit
selection_hit_duts=selection_hit_duts, # Only use selected duts
exclude_dut_hit=False, # For constrained residuals
selection_track_quality=selection_track_quality,
force_prealignment=False,
chunk_size=chunk_size)
# Step 3: Calculate the residuals for each DUT
logging.info('= Alignment step 3 / iteration %d: Calculate the residuals for each selected DUT =', iteration)
calculate_residuals(input_tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.h5' % iteration,
input_alignment_file=alignment_file,
output_residuals_file=os.path.splitext(track_candidates_file)[0] + '_residuals_%d_tmp.h5' % iteration,
n_pixels=n_pixels,
pixel_size=pixel_size,
# smaller devices needs None, otherwise npixels_per_bin=5 and nbins_per_pixel=1 might improve the first step
npixels_per_bin=None,
nbins_per_pixel=None,
plot=False,
chunk_size=chunk_size)
# Step 4: Deduce rotations from the residuals
logging.info('= Alignment step 4 / iteration %d: Deduce rotations and translations from the residuals =', iteration)
alignment_parameters_change, new_total_residual = _analyze_residuals(residuals_file=os.path.splitext(track_candidates_file)[0] + '_residuals_%d_tmp.h5' % iteration,
fit_duts=fit_duts,
pixel_size=pixel_size,
n_duts=n_duts,
translation_only=False,
plot_title_prefix=plot_title_prefix,
relaxation_factor=1.0, # FIXME: good code practice: nothing hardcoded
output_pdf=output_pdf)
# Create actual alignment (old alignment + the actual relative change)
new_alignment_parameters = geometry_utils.merge_alignment_parameters(
alignment_last_iteration,
alignment_parameters_change,
select_duts=fit_duts,
mode='relative')
# FIXME: This step does not work well
# # Step 5: Try to find better rotation by minimizing the residual in x + y for different angles
# logging.info('= Alignment step 5 / iteration %d: Optimize alignment by minimizing residuals =', iteration)
# new_alignment_parameters, new_total_residual = _optimize_alignment(tracks_file=os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.h5' % iteration,
# alignment_last_iteration=alignment_last_iteration,
# new_alignment_parameters=new_alignment_parameters,
# pixel_size=pixel_size)
# Delete not needed files
os.remove(os.path.splitext(track_candidates_file)[0] + '_no_align_%d_tmp.h5' % iteration)
os.remove(os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.h5' % iteration)
os.remove(os.path.splitext(track_candidates_file)[0] + '_tracks_%d_tmp.pdf' % iteration)
os.remove(os.path.splitext(track_candidates_file)[0] + '_residuals_%d_tmp.h5' % iteration)
logging.info('Total residual %1.4e', new_total_residual)
if total_residual is not None and new_total_residual > total_residual: # True if actual alignment is worse than the alignment from last iteration
logging.info('!! Best alignment found !!')
logging.info('= Alignment step 6 / iteration %d: Use rotation / translation information from previous iteration =', iteration)
geometry_utils.store_alignment_parameters(alignment_file, # Store alignment from last iteration
alignment_last_iteration,
mode='absolute',
select_duts=fit_duts)
return
else:
total_residual = new_total_residual
alignment_last_iteration = new_alignment_parameters.copy() # in_file_h5.root.Alignment[:]
logging.info('= Alignment step 6 / iteration %d: Set new rotation / translation information in alignment file =', iteration)
geometry_utils.store_alignment_parameters(alignment_file,
new_alignment_parameters,
mode='absolute',
select_duts=fit_duts)
# Helper functions for the alignment. Not to be used directly.
def _create_alignment_array(n_duts):
# Result Translation / rotation table
description = [('DUT', np.int32)]
description.append(('translation_x', np.float))
description.append(('translation_y', np.float))
description.append(('translation_z', np.float))
description.append(('alpha', np.float))
description.append(('beta', np.float))
description.append(('gamma', np.float))
description.append(('correlation_x', np.float))
description.append(('correlation_y', np.float))
array = np.zeros((n_duts,), dtype=description)
array[:]['DUT'] = np.array(range(n_duts))
return array
def _analyze_residuals(residuals_file, fit_duts, pixel_size, n_duts, translation_only=False, relaxation_factor=1.0, plot_title_prefix='', output_pdf=None):
''' Take the residual plots and deduce rotation and translation angles from them '''
alignment_parameters = _create_alignment_array(n_duts)
total_residual = 0 # Sum of all residuals to judge the overall alignment
with tb.open_file(residuals_file) as in_file_h5:
for dut_index in fit_duts:
alignment_parameters[dut_index]['DUT'] = dut_index
# Global residuals
hist_node = in_file_h5.get_node('/ResidualsX_DUT%d' % dut_index)
std_x = hist_node._v_attrs.fit_coeff[2]
# Add resdidual to total residual normalized to pixel pitch in x
total_residual = np.sqrt(np.square(total_residual) + np.square(std_x / pixel_size[dut_index][0]))
if output_pdf is not None:
plot_utils.plot_residuals(histogram=hist_node[:],
edges=hist_node._v_attrs.xedges,
fit=hist_node._v_attrs.fit_coeff,
fit_errors=hist_node._v_attrs.fit_cov,
title='Residuals for DUT%d' % dut_index,
x_label='X residual [um]',
output_pdf=output_pdf)
hist_node = in_file_h5.get_node('/ResidualsY_DUT%d' % dut_index)
std_y = hist_node._v_attrs.fit_coeff[2]
# Add resdidual to total residual normalized to pixel pitch in y
total_residual = np.sqrt(np.square(total_residual) + np.square(std_y / pixel_size[dut_index][1]))
if translation_only:
return alignment_parameters, total_residual
if output_pdf is not None:
plot_utils.plot_residuals(histogram=hist_node[:],
edges=hist_node._v_attrs.xedges,
fit=hist_node._v_attrs.fit_coeff,
fit_errors=hist_node._v_attrs.fit_cov,
title='Residuals for DUT%d' % dut_index,
x_label='Y residual [um]',
output_pdf=output_pdf)
# use offset at origin of sensor (center of sensor) to calculate x and y correction
# do not use mean/median of 1D residual since it depends on the beam spot position when the device is rotated
mu_x = in_file_h5.get_node_attr('/YResidualsX_DUT%d' % dut_index, 'fit_coeff')[0]
mu_y = in_file_h5.get_node_attr('/XResidualsY_DUT%d' % dut_index, 'fit_coeff')[0]
# use slope to calculate alpha, beta and gamma
m_xx = in_file_h5.get_node_attr('/XResidualsX_DUT%d' % dut_index, 'fit_coeff')[1]
m_yy = in_file_h5.get_node_attr('/YResidualsY_DUT%d' % dut_index, 'fit_coeff')[1]
m_xy = in_file_h5.get_node_attr('/XResidualsY_DUT%d' % dut_index, 'fit_coeff')[1]
m_yx = in_file_h5.get_node_attr('/YResidualsX_DUT%d' % dut_index, 'fit_coeff')[1]
alpha, beta, gamma = analysis_utils.get_rotation_from_residual_fit(m_xx=m_xx, m_xy=m_xy, m_yx=m_yx, m_yy=m_yy)
alignment_parameters[dut_index]['correlation_x'] = std_x
alignment_parameters[dut_index]['translation_x'] = -mu_x
alignment_parameters[dut_index]['correlation_y'] = std_y
alignment_parameters[dut_index]['translation_y'] = -mu_y
alignment_parameters[dut_index]['alpha'] = alpha * relaxation_factor
alignment_parameters[dut_index]['beta'] = beta * relaxation_factor
alignment_parameters[dut_index]['gamma'] = gamma * relaxation_factor
return alignment_parameters, total_residual
def _optimize_alignment(tracks_file, alignment_last_iteration, new_alignment_parameters, pixel_size):
''' Changes the angles of a virtual plane such that the projected track intersections onto this virtual plane
are most close to the measured hits on the real DUT at this position. Then the angles of the virtual plane
should correspond to the real DUT angles. The distance is not weighted quadratically (RMS) but linearly since
this leads to better results (most likely heavily scattered tracks / beam angle spread at the edges are weighted less).'''
# Create new absolute alignment
alignment_result = new_alignment_parameters
def _minimize_me(align, dut_position, hit_x_local, hit_y_local, hit_z_local, pixel_size, offsets, slopes):
# Calculate intersections with a dut plane given by alpha, beta, gamma at the dut_position in the global coordinate system
rotation_matrix = geometry_utils.rotation_matrix(alpha=align[0],
beta=align[1],
gamma=align[2])
basis_global = rotation_matrix.T.dot(np.eye(3))
dut_plane_normal = basis_global[2]
actual_dut_position = dut_position.copy()
actual_dut_position[2] = align[3] * 1e6 # Convert z position from m to um
intersections = geometry_utils.get_line_intersections_with_plane(line_origins=offsets,
line_directions=slopes,
position_plane=actual_dut_position,
normal_plane=dut_plane_normal)
# Transform to the local coordinate system to compare with measured hits
transformation_matrix = geometry_utils.global_to_local_transformation_matrix(x=actual_dut_position[0],
y=actual_dut_position[1],
z=actual_dut_position[2],
alpha=align[0],
beta=align[1],
gamma=align[2])
intersection_x_local, intersection_y_local, intersection_z_local = geometry_utils.apply_transformation_matrix(x=intersections[:, 0],
y=intersections[:, 1],
z=intersections[:, 2],
transformation_matrix=transformation_matrix)
# Cross check if transformations are correct (z == 0 in the local coordinate system)
if not np.allclose(hit_z_local[np.isfinite(hit_z_local)], 0) or not np.allclose(intersection_z_local, 0):
logging.error('Hit z position = %s and z intersection %s',
str(hit_z_local[~np.isclose(hit_z_local, 0)][:3]),
str(intersection_z_local[~np.isclose(intersection_z_local, 0)][:3]))
raise RuntimeError('The transformation to the local coordinate system did not give all z = 0. Wrong alignment used?')
return np.sum(np.abs(hit_x_local - intersection_x_local) / pixel_size[0]) + np.sum(np.abs(hit_y_local - intersection_y_local)) / pixel_size[1]
# return np.sqrt(np.square(np.std(hit_x_local - intersection_x_local) / pixel_size[0]) + np.square(np.std(hit_y_local - intersection_y_local)) / pixel_size[1])
with tb.open_file(tracks_file, mode='r') as in_file_h5:
residuals_before = []
residuals_after = []
for node in in_file_h5.root:
actual_dut = int(re.findall(r'\d+', node.name)[-1])
dut_position = np.array([alignment_last_iteration[actual_dut]['translation_x'], alignment_last_iteration[actual_dut]['translation_y'], alignment_last_iteration[actual_dut]['translation_z']])
# Hits with the actual alignment
hits = np.column_stack((node[:]['x_dut_%d' % actual_dut], node[:]['y_dut_%d' % actual_dut], node[:]['z_dut_%d' % actual_dut]))
# Transform hits to the local coordinate system
hit_x_local, hit_y_local, hit_z_local = geometry_utils.apply_alignment(hits_x=hits[:, 0],
hits_y=hits[:, 1],
hits_z=hits[:, 2],
dut_index=actual_dut,
alignment=alignment_last_iteration,
inverse=True)
# Track infos
offsets = np.column_stack((node[:]['offset_0'], node[:]['offset_1'], node[:]['offset_2']))
slopes = np.column_stack((node[:]['slope_0'], node[:]['slope_1'], node[:]['slope_2']))
# Rotation start values of minimizer
alpha = alignment_result[actual_dut]['alpha']
beta = alignment_result[actual_dut]['beta']
gamma = alignment_result[actual_dut]['gamma']
z_position = alignment_result[actual_dut]['translation_z']
# Trick to have the same order of magnitue of variation for angles and position, otherwise scipy minimizers
# do not converge if step size of parameters is very different
z_position_in_m = z_position / 1e6
residual = _minimize_me(np.array([alpha, beta, gamma, z_position_in_m]),
dut_position,
hit_x_local,
hit_y_local,
hit_z_local,
pixel_size[actual_dut],
offsets,
slopes)
residuals_before.append(residual)
logging.info('Optimize angles / z of DUT%d with start parameters: %1.2e, %1.2e, %1.2e Rad and z = %d um with residual %1.2e' % (actual_dut,
alpha,
beta,
gamma,
z_position_in_m * 1e6,
residual))
# FIXME:
# Has to be heavily restricted otherwise converges to unphysical solutions since the scoring with residuals is not really working well
bounds = [(alpha - 0.01, alpha + 0.01), (beta - 0.01, beta + 0.01), (gamma - 0.001, gamma + 0.001), (z_position_in_m - 10e-6, z_position_in_m + 10e-6)]
result = minimize(fun=_minimize_me,
x0=np.array([alpha, beta, gamma, z_position_in_m]), # Start values from residual fit
args=(dut_position, hit_x_local, hit_y_local, hit_z_local, pixel_size[actual_dut], offsets, slopes),
bounds=bounds,
method='SLSQP')
alpha, beta, gamma, z_position_in_m = result.x
residual = _minimize_me(result.x,
dut_position,
hit_x_local,
hit_y_local,
hit_z_local,
pixel_size[actual_dut],
offsets,
slopes)
residuals_after.append(residual)
logging.info('Found angles of DUT%d with best angles: %1.2e, %1.2e, %1.2e Rad and z = %d um with residual %1.2e' % (actual_dut,
alpha,
beta,
gamma,
z_position_in_m * 1e6,
residual))
# Rotation start values of minimizer
alignment_result[actual_dut]['alpha'] = alpha
alignment_result[actual_dut]['beta'] = beta
alignment_result[actual_dut]['gamma'] = gamma
alignment_result[actual_dut]['translation_z'] = z_position_in_m * 1e6 # convert z position from m to um
total_residuals_before = np.sqrt(np.sum(np.square(np.array(residuals_before))))
total_residuals_after = np.sqrt(np.sum(np.square(np.array(residuals_after))))
logging.info('Reduced the total residuals in the optimization steps from %1.2e to %1.2e', total_residuals_before, total_residuals_after)
if total_residuals_before < total_residuals_after:
raise RuntimeError('Alignment optimization did not converge!')
return alignment_result, total_residuals_after # Return alignment result and total residual
# Helper functions to be called from multiple processes
def _correlate_cluster(cluster_dut_0, cluster_file, start_index, start_event_number, stop_event_number, column_correlation, row_correlation, chunk_size):
with tb.open_file(cluster_file, mode='r') as actual_in_file_h5: # Open other DUT cluster file
for actual_dut_cluster, start_index in analysis_utils.data_aligned_at_events(actual_in_file_h5.root.Cluster, start_index=start_index, start_event_number=start_event_number, stop_event_number=stop_event_number, chunk_size=chunk_size, fail_on_missing_events=False): # Loop over the cluster in the actual cluster file in chunks
analysis_utils.correlate_cluster_on_event_number(data_1=cluster_dut_0,
data_2=actual_dut_cluster,
column_corr_hist=column_correlation,
row_corr_hist=row_correlation)
return start_index, column_correlation, row_correlation
| mit |
nmayorov/scikit-learn | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ron1818/Singaboat_RobotX2016 | robotx_nav/nodes/task7_non_process_2.py | 1 | 8074 | #!/usr/bin/env python
""" Mission 7-Detect and Deliver
1. Random walk with gaussian at center of map until station position is acquired
2. loiter around until correct face seen
3. if symbol seen, move towards symbol perpendicularly
4. if close enough, do move_base aiming
task 7:
-----------------
Created by Reinaldo@ 2016-12-07
Authors: Reinaldo
-----------------
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
import random
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_waypoint import MoveTo
from move_base_loiter import Loiter
from move_base_stationkeeping import StationKeeping
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import Int8
class DetectDeliver(object):
map_dim = [[0, 40], [0, 40]]
MAX_DATA=10
x0, y0, yaw0= 0, 0, 0
symbol=[0 , 0]
symbols=np.zeros((MAX_DATA, 2)) #unordered list
symbols_counter=0
angle_threshold=10*math.pi/180
symbol_location=np.zeros((MAX_DATA, 2))
shape_counter=0
distance_to_box=3
def __init__(self, symbol_list):
print("starting task 7")
rospy.init_node('task_7', anonymous=True)
self.symbol=symbol_list
self.symbol_visited=0
self.symbol_seen=False
self.symbol_position=[0, 0, 0]
self.station_seen=False #station here is cluster center of any face
self.station_position=[0, 0]
self.loiter_obj = Loiter("loiter", is_newnode=False, target=None, radius=5, polygon=4, mode=2, mode_param=1, is_relative=False)
self.moveto_obj = MoveTo("moveto", is_newnode=False, target=None, is_relative=False)
self.stationkeep_obj = StationKeeping("station_keeping", is_newnode=False, target=None, radius=2, duration=30)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.symbol_callback, queue_size = 50)
rospy.Subscriber("/finished_search_and_shoot", Int8, self.stop_shoot_callback, queue_size = 5)
self.shooting_pub= rospy.Publisher('/start_search_and_shoot', Int8, queue_size=5)
self.marker_pub= rospy.Publisher('/waypoint_markers', Marker, queue_size=5)
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
self.odom_received = False
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
print(self.symbol)
while not rospy.is_shutdown() and not self.station_seen:
self.moveto_obj.respawn(self.random_walk(), )#forward
print("station: ")
print(self.station_position)
#loiter around station until symbol's face seen
loiter_radius=math.sqrt((self.x0-self.station_position[0])**2+(self.y0-self.station_position[1])**2)
if loiter_radius>10:
loiter_radius=10
while not rospy.is_shutdown():
print(loiter_radius)
self.loiter_obj.respawn(self.station_position, loiter_radius, )
if loiter_radius>4:
loiter_radius-=2
if self.symbol_seen:
print(self.symbol_position)
print("symbol's position acquired, exit loitering")
break
time.sleep(1)
print(self.symbol_position)
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
counter=0
print(d)
#moveto an offset, replan in the way
while not rospy.is_shutdown():
alpha=self.yaw0-self.symbol_position[2]
theta=math.atan2(math.fabs(math.sin(alpha)), math.fabs(math.cos(alpha))) #always +ve and 0-pi/2
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
perpendicular_d=0.6*d*math.cos(theta)
if counter ==0 or theta>self.angle_threshold or d>self.distance_to_box:
print("replan")
target=[self.symbol_position[0]+perpendicular_d*math.cos(self.symbol_position[2]),self.symbol_position[1]+perpendicular_d*math.sin(self.symbol_position[2]), -self.symbol_position[2]]
self.moveto_obj.respawn(target, )
counter+=1
if d<self.distance_to_box:
break
time.sleep(1)
#aiming to the box
self.shooting_complete=False
self.is_aiming=False
print("aiming to box")
print("start shooting module")
self.shooting_pub.publish(1)
station=[self.x0, self.y0, -self.symbol_position[2]]
radius=2
duration=30
print(self.symbol_position)
print(station)
while not rospy.is_shutdown():
self.shooting_pub.publish(1)
#duration 0 is forever
if not self.is_aiming:
self.stationkeep_obj.respawn(station, radius, duration)
#make aiming respawn
if self.shooting_complete:
print("shooting done, return to base")
break
time.sleep(1)
def stop_shoot_callback(self, msg):
if msg.data==1:
#stop aiming station
self.shooting_complete=True
def random_walk(self):
""" create random walk points and more favor towards center """
x = random.gauss(np.mean(self.map_dim[0]), 0.25 * np.ptp(self.map_dim[0]))
y = random.gauss(np.mean(self.map_dim[1]), 0.25 * np.ptp(self.map_dim[1]))
return self.map_constrain(x, y)
def map_constrain(self, x, y):
""" constrain x and y within map """
if x > np.max(self.map_dim[0]):
x = np.max(self.map_dim[0])
elif x < np.min(self.map_dim[0]):
x = np.min(self.map_dim[0])
else:
x = x
if y > np.max(self.map_dim[1]):
y = np.max(self.map_dim[1])
elif y < np.min(self.map_dim[1]):
y = np.min(self.map_dim[1])
else:
y = y
return [x, y, 0]
def symbol_callback(self, msg):
if len(msg.markers)>0:
if self.symbols_counter>self.MAX_DATA:
station_kmeans = KMeans(n_clusters=1).fit(self.symbols)
self.station_center=station_kmeans.cluster_centers_
self.station_position[0]=self.station_center[0][0]
self.station_position[1]=self.station_center[0][1]
self.station_seen=True
for i in range(len(msg.markers)):
self.symbols[self.symbols_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.symbols_counter+=1
if msg.markers[i].type==self.symbol[0] and msg.markers[i].id==self.symbol[1]:
#set position_list (not sure)
self.symbol_position[0]=msg.markers[i].pose.position.x
self.symbol_position[1]=msg.markers[i].pose.position.y
x = msg.markers[i].pose.orientation.x
y = msg.markers[i].pose.orientation.y
z = msg.markers[i].pose.orientation.z
w = msg.markers[i].pose.orientation.w
_, _, self.symbol_position[2] = euler_from_quaternion((x, y, z, w))
self.symbol_location[self.shape_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.shape_counter+=1
if self.station_seen and self.shape_counter>self.MAX_DATA:
symbol_kmeans = KMeans(n_clusters=1).fit(self.symbol_location)
self.symbol_center=symbol_kmeans.cluster_centers_
self.symbol_position[0]=self.symbol_center[0][0]
self.symbol_position[1]=self.symbol_center[0][1]
#print(self.symbol_position)
self.symbol_seen=True
#self.pool.apply(cancel_loiter)
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
#[id,type]cruciform red
DetectDeliver([1,0])
except rospy.ROSInterruptException:
rospy.loginfo("Task 7 Finished")
| gpl-3.0 |
nicolaschotard/snspin | snspin/spinmeas.py | 1 | 61529 | #!/usr/bin/env python
"""Class to use spin."""
import sys
import numpy as N
import matplotlib.pyplot as P
from snspin.spin import Craniometer, get_cranio
from snspin.spectrum import merge
class DrGall(object):
"""Class to manipulate and use the craniometer."""
def __init__(self, spec=None, specb=None, specr=None,
spec_merged=None, verbose=True):
"""
Spectrum initialization.
Create self.x, self.y, self.v if spec is the merged spectrum
Create self.xB, self.yB, self.vB for the blue channel if specB is given
Create self.xr, self.yr, self.vr for the blue channel if specr is given
"""
self.values_initialization()
self.xb = None
self.xr = None
self.x_merged = None
if spec is not None:
if spec.x[0] < 4000 and spec.x[-1] > 6500:
self.x = N.array(spec.x)
self.y = N.array(spec.y)
self.v = N.array(spec.v)
self.xb = N.array(spec.x)
self.yb = N.array(spec.y)
self.vb = N.array(spec.v)
self.xr = N.array(spec.x)
self.yr = N.array(spec.y)
self.vr = N.array(spec.v)
self.x_merged = N.array(spec.x)
self.y_merged = N.array(spec.y)
self.v_merged = N.array(spec.v)
elif spec.x[0] < 4000 and spec.x[-1] < 6500:
self.xb = N.array(spec.x)
self.yb = N.array(spec.y)
self.vb = N.array(spec.v)
elif spec.x[0] > 4000 and spec.x[-1] > 6500:
self.xr = N.array(spec.x)
self.yr = N.array(spec.y)
self.vr = N.array(spec.v)
if verbose:
print >> sys.stderr, 'INFO: Working on merged spectrum'
elif specb or specr:
if (specb and specb.x[0] > 4000) or (specr and specr.x[0] < 4000):
print >> sys.stderr, 'Error, check if B channel is really B '\
'channel and not r channel'
return
try:
self.xb = N.array(specb.x)
self.yb = N.array(specb.y)
self.vb = N.array(specb.v)
except ValueError:
pass
try:
self.xr = N.array(specr.x)
self.yr = N.array(specr.y)
self.vr = N.array(specr.v)
except ValueError:
pass
if self.xb is not None and self.xr is not None:
try:
spec_merged = merge.MergedSpectrum(specb, specr)
self.x_merged = N.array(spec_merged.x)
self.y_merged = N.array(spec_merged.y)
self.v_merged = N.array(spec_merged.v)
except ValueError:
print >> sys.stderr, 'Merged spectrum failure'
if verbose:
if self.xb is not None and self.xr is not None:
print >> sys.stderr, 'Work on B and r channel'
elif self.xb is not None and self.xr is None:
print >> sys.stderr, 'Work only on B channel'
elif self.xb is None and self.xr is not None:
print >> sys.stderr, 'Work only on r channel'
elif self.xb is None \
and self.xr is None \
and not hasattr(self, 'x'):
print >> sys.stderr, 'Work on merged spectrum'
else:
print >> sys.stderr, 'ErrOr, no correct input in DrGall. '\
'Give me a spectrum (for instance; spec with '\
'spec.x, spec.y and spec.v)'
sys.exit()
else:
print >> sys.stderr, 'ErrOr, no correct input in DrGall. Give me a'\
'spectrum (for instance; spec with spec.x, spec.y and spec.v)'
sys.exit()
def values_initialization(self, verbose=False):
"""Initialize all values."""
values = {}
# Initialisation craniomter
fake_lbd = range(3000, 10000, 2)
cranio = Craniometer(fake_lbd,
N.ones(len(fake_lbd)),
N.ones(len(fake_lbd)))
cranio.init_only = True
# Create values
cranio.rca(verbose=verbose)
cranio.rcas(verbose=verbose)
cranio.rcas2(verbose=verbose)
cranio.rsi(verbose=verbose)
cranio.rsis(verbose=verbose)
cranio.rsiss(verbose=verbose)
cranio.ew(3504, 3687, 3887, 3990, 'caiiHK', verbose=verbose)
cranio.ew(3830, 3963, 4034, 4150, 'siii4000', verbose=verbose)
cranio.ew(4034, 4150, 4452, 4573, 'mgii', verbose=verbose)
cranio.ew(5085, 5250, 5500, 5681, 'SiiW', verbose=verbose)
cranio.ew(5085, 5250, 5250, 5450, 'SiiW_L', verbose=verbose)
cranio.ew(5250, 5450, 5500, 5681, 'SiiW_r', verbose=verbose)
cranio.ew(5550, 5681, 5850, 6015, 'siii5972', verbose=verbose)
cranio.ew(5850, 6015, 6250, 6365, 'siii6355', verbose=verbose)
cranio.ew(7100, 7270, 7720, 8000, 'oi7773', verbose=verbose)
cranio.ew(7720, 8000, 8300, 8800, 'caiiir', verbose=verbose)
cranio.ew(4400, 4650, 5050, 5300, 'fe4800', verbose=verbose)
cranio.velocity({'lmin': 3963,
'lmax': 4034,
'lrest': 4128,
'name': 'vsiii_4128'},
verbose=verbose)
cranio.velocity({'lmin': 5200,
'lmax': 5350,
'lrest': 5454,
'name': 'vsiii_5454'},
verbose=verbose)
cranio.velocity({'lmin': 5351,
'lmax': 5550,
'lrest': 5640,
'name': 'vsiii_5640'},
verbose=verbose)
cranio.velocity({'lmin': 5700,
'lmax': 5900,
'lrest': 5972,
'name': 'vsiii_5972'},
verbose=verbose)
cranio.velocity({'lmin': 6000,
'lmax': 6210,
'lrest': 6355,
'name': 'vsiii_6355'},
verbose=verbose)
# Update values
values.update(cranio.rcavalues)
values.update(cranio.rcasvalues)
values.update(cranio.rcas2values)
values.update(cranio.rsivalues)
values.update(cranio.rsisvalues)
values.update(cranio.rsissvalues)
values.update(cranio.velocityvalues)
values.update(cranio.ewvalues)
self.values = values
def calcium_computing(self, smoother="sgfilter", verbose=False, nsimu=1000):
"""
Function to compute and return all spectral indicators in the calcium zone.
(Blue part of the spectrum, B channel)
"""
# Test if computing is possible
if self.xb is None:
print >> sys.stderr, 'ErrOr, impossible to compute spectral '\
'indictors defined in calcium zone (maybe no B channel)'
indicators = {'edca': [N.nan, N.nan],
'rca': [N.nan, N.nan],
'rcas': [N.nan, N.nan],
'rcas2': [N.nan, N.nan],
'ewcaiiHK': [N.nan, N.nan],
'ewsiii4000': [N.nan, N.nan],
'ewmgii': [N.nan, N.nan]}
return indicators
if verbose:
print >> sys.stderr, '\nINFO: Computing SI on calcium zone for this spectrum'
# Create zone and craniometers
cazone = (self.xb > 3450) & (self.xb < 4070)
sizone = (self.xb > 3850) & (self.xb < 4150)
mgzone = (self.xb > 4000) & (self.xb < 4610)
self.cranio_bca = get_cranio(self.xb[cazone],
self.yb[cazone],
self.vb[cazone],
smoother=smoother,
verbose=verbose,
nsimu=nsimu)
self.cranio_bsi = get_cranio(self.xb[sizone],
self.yb[sizone],
self.vb[sizone],
smoother=smoother,
verbose=verbose,
nsimu=nsimu)
self.cranio_bmg = get_cranio(self.xb[mgzone],
self.yb[mgzone],
self.vb[mgzone],
smoother=smoother,
verbose=verbose,
nsimu=nsimu)
rca = self.cranio_bca.rca(verbose=verbose)
try:
rca = self.cranio_bca.rca(verbose=verbose)
self.values.update(self.cranio_bca.rcavalues)
if verbose:
print 'rca computing done, rca =', rca
except ValueError:
rca = [N.nan, N.nan]
if verbose:
print 'Error in rca computing, rca =', rca
try:
rcas = self.cranio_bca.rcas(verbose=verbose)
self.values.update(self.cranio_bca.rcasvalues)
if verbose:
print 'rcas computing done, rcas =', rcas
except ValueError:
rcas = [N.nan, N.nan]
if verbose:
print 'Error in rcas computing, rcas =', rcas
try:
rcas2 = self.cranio_bca.rcas2(verbose=verbose)
self.values.update(self.cranio_bca.rcas2values)
if verbose:
print 'rcas2 computing done, rcas2 =', rcas2
except ValueError:
rcas2 = [N.nan, N.nan]
if verbose:
print 'Error in rcas2 computing, rcas2 =', rcas2
try:
ewcaiihk = self.cranio_bca.ew(3504, 3687, 3830, 3990, 'caiiHK', sup=True, right1=True,
verbose=verbose)
self.values.update(self.cranio_bca.ewvalues)
if verbose:
print 'ewcaiiHK computing done, ewcaiiHK =', ewcaiihk
except ValueError:
ewcaiihk = [N.nan, N.nan]
if verbose:
print 'Error in ewcaiiHK computing, ewcaiiHK =', ewcaiihk
try:
ewsiii4000 = self.cranio_bsi.ew(3830, 3990, 4030, 4150,
'siii4000',
sup=True,
verbose=verbose)
self.values.update(self.cranio_bsi.ewvalues)
if verbose:
print 'ewsiii4000 computing done, ewsiii4000 =', ewsiii4000
except ValueError:
ewsiii4000 = [N.nan, N.nan]
if verbose:
print 'Error in ewsiii4000 computing ewsiii4000 =', ewsiii4000
try:
ewmgii = self.cranio_bmg.ew(4030, 4150, 4450, 4650,
'mgii',
sup=True,
left2=True,
verbose=verbose)
self.values.update(self.cranio_bmg.ewvalues)
if verbose:
print 'ewmgii computing done, ewmgii = ', ewmgii
except ValueError:
ewmgii = [N.nan, N.nan]
if verbose:
print 'Error in ewmgii computing, ewmgii =', ewmgii
try:
vsiii_4000 = self.cranio_bsi.velocity({'lmin': 3963,
'lmax': 4034,
'lrest': 4128,
'name': 'vsiii_4128'},
verbose=verbose)
self.values.update(self.cranio_bsi.velocityvalues)
if verbose:
print 'vsiii_4128 computing done, vsiii_4000 =', vsiii_4000
except ValueError:
vsiii_4000 = [N.nan, N.nan]
if verbose:
print 'Error in vsiii_4128 computing, vsiii_4000', vsiii_4000
indicators = {'rca': rca,
'rcas2': rcas2,
'ewcaiiHK': ewcaiihk,
'ewsiii4000': ewsiii4000,
'ewmgii': ewmgii,
'vsiii4128': vsiii_4000}
del self.cranio_bca.simulations
del self.cranio_bca.syst
del self.cranio_bsi.simulations
del self.cranio_bsi.syst
del self.cranio_bmg.simulations
del self.cranio_bmg.syst
return indicators
def silicon_computing(self, smoother="sgfilter", verbose=False, nsimu=1000):
"""Function to compute and retunr all spectral indicators in the silicon zone."""
# Test if computing is possible
if self.xr is None:
print >> sys.stderr, 'Error, impossible to compute spectral '\
'indictors defined in calcium zone (maybe no r channel)'
indicators = {'edca': [N.nan, N.nan],
'rca': [N.nan, N.nan],
'rcas': [N.nan, N.nan],
'rcas2': [N.nan, N.nan],
'ewcaiiHK': [N.nan, N.nan],
'ewsiii4000': [N.nan, N.nan],
'ewmgii': [N.nan, N.nan],
'vsiii_5972': [N.nan, N.nan],
'vsiii_6355': [N.nan, N.nan]}
return indicators
if verbose:
print >> sys.stderr, '\nINFO: Computing SI on silicon zone for this spectrum'
# Create zone and craniometers
zone1 = (self.xr > 5500) & (self.xr < 6400)
zone2 = (self.xr > 5060) & (self.xr < 5700)
zone3 = (self.xr > 5500) & (self.xr < 6050)
zone4 = (self.xr > 5800) & (self.xr < 6400)
zone5 = (self.xr > 5480) & (self.xr < 6500)
self.cranio_r1 = get_cranio(self.xr[zone1],
self.yr[zone1],
self.vr[zone1],
smoother=smoother,
nsimu=nsimu,
verbose=verbose) # rsi, rsis
self.cranio_r2 = get_cranio(self.xr[zone2],
self.yr[zone2],
self.vr[zone2],
smoother=smoother,
nsimu=nsimu,
verbose=verbose) # ewSiiW
self.cranio_r3 = get_cranio(self.xr[zone3],
self.yr[zone3],
self.vr[zone3],
smoother=smoother,
nsimu=nsimu,
verbose=verbose) # ewsiii5972
self.cranio_r4 = get_cranio(self.xr[zone4],
self.yr[zone4],
self.vr[zone4],
smoother=smoother,
verbose=verbose) # ewsiii6355
self.cranio_r5 = get_cranio(self.xr[zone5],
self.yr[zone5],
self.vr[zone5],
smoother=smoother,
nsimu=nsimu,
verbose=verbose) # rsiss
try:
rsi = self.cranio_r1.rsi(verbose=verbose)
self.values.update(self.cranio_r1.rsivalues)
if verbose:
print 'rsi computing done, rsi =', rsi
except ValueError:
rsi = [N.nan, N.nan]
if verbose:
print 'Error in rsi computing, rsi =', rsi
try:
rsis = self.cranio_r1.rsis(verbose=verbose)
self.values.update(self.cranio_r1.rsisvalues)
if verbose:
print 'rsis computing done, rsis =', rsis
except ValueError:
rsis = [N.nan, N.nan]
if verbose:
print 'Error in rsis computing, rsis =', rsis
try:
rsiss = self.cranio_r5.rsiss(verbose=verbose)
self.values.update(self.cranio_r5.rsissvalues)
if verbose:
print 'rsiss computing done, rsiss =', rsiss
except ValueError:
rsiss = [N.nan, N.nan]
if verbose:
print 'Error in rsiss computing, rsiss =', rsiss
try:
ewsiiw = self.cranio_r2.ew(5050, 5285, 5500, 5681,
'SiiW',
sup=True,
# right1=True,
verbose=verbose)
if verbose:
print 'ewSiiW computing done, ewSiiW =', ewsiiw
except ValueError:
ewsiiw = [N.nan, N.nan]
if verbose:
print 'Error in ewSiiW computing, ewSiiW =', ewsiiw
try:
ewsiiw_L = self.cranio_r2.ew(5085, 5250, 5250, 5450,
'SiiW_L',
sup=True,
right1=True,
verbose=verbose)
if verbose:
print 'ewSiiW_L computing done, ewSiiW_L =', ewsiiw_L
except ValueError:
ewsiiw_L = [N.nan, N.nan]
if verbose:
print 'Error in ewSiiW_L computing, ewSiiW_L =', ewsiiw_L
try:
ewsiiw_r = self.cranio_r2.ew(5250, 5450, 5500, 5681,
'SiiW_r',
sup=True,
verbose=verbose)
if verbose:
print 'ewSiiW_r computing done, ewSiiW_r =', ewsiiw_r
except ValueError:
ewsiiw_r = [N.nan, N.nan]
if verbose:
print 'Error in ewSiiW_r computing, ewSiiW_r =', ewsiiw_r
try:
self.values.update(self.cranio_r2.ewvalues)
except ValueError:
pass
try:
ewsiii5972 = self.cranio_r3.ew(5550, 5681, 5850, 6015,
'siii5972',
sup=True,
right2=True,
verbose=verbose)
self.values.update(self.cranio_r3.ewvalues)
if verbose:
print 'ewsiii5972 computing done, ewsiii5972 =', ewsiii5972
except ValueError:
ewsiii5972 = [N.nan, N.nan]
if verbose:
print 'Error in ewsiii5972 computing, ewsiii5972 =', ewsiii5972
try:
ewsiii6355 = self.cranio_r4.ew(5850, 6015, 6250, 6365,
'siii6355',
right1=True,
sup=True,
verbose=verbose)
self.values.update(self.cranio_r4.ewvalues)
if verbose:
print 'ewsiii6355 computing done, ewsiii6355 =', ewsiii6355
except ValueError:
ewsiii6355 = [N.nan, N.nan]
if verbose:
print 'Error in ewsiii6355 computing, ewsiii6355 =', ewsiii6355
try:
vsiii_5454 = self.cranio_r2.velocity({'lmin': 5200,
'lmax': 5350,
'lrest': 5454,
'name': 'vsiii_5454'},
verbose=verbose)
self.values.update(self.cranio_r2.velocityvalues)
if verbose:
print 'vsiii_5454 computing done, vsiii_5454 =', vsiii_5454
except ValueError:
vsiii_5454 = [N.nan, N.nan]
if verbose:
print 'Error in vsiii_5454 computing, vsiii_5454 =', vsiii_5454
try:
vsiii_5640 = self.cranio_r2.velocity({'lmin': 5351,
'lmax': 5550,
'lrest': 5640,
'name': 'vsiii_5640'},
verbose=verbose)
self.values.update(self.cranio_r2.velocityvalues)
if verbose:
print 'vsiii_5640 computing done, vsiii_5640 =', vsiii_5640
except ValueError:
vsiii_5640 = [N.nan, N.nan]
if verbose:
print 'Error in vsiii_5640 computing, vsiii_5640 =', vsiii_5640
try:
vsiii_5972 = self.cranio_r3.velocity({'lmin': 5700,
'lmax': 5875,
'lrest': 5972,
'name': 'vsiii_5972'},
verbose=verbose)
self.values.update(self.cranio_r3.velocityvalues)
if verbose:
print 'vsiii_5972 computing done, vsiii_5972 =', vsiii_5972
except ValueError:
vsiii_5972 = [N.nan, N.nan]
if verbose:
print 'Error in vsiii_5972 computing, vsiii_5972 =', vsiii_5972
try:
vsiii_6355 = self.cranio_r4.velocity({'lmin': 6000,
'lmax': 6210,
'lrest': 6355,
'name': 'vsiii_6355'},
verbose=verbose)
self.values.update(self.cranio_r4.velocityvalues)
if verbose:
print 'vsiii_6355 computing done, vsiii_6355 =', vsiii_6355
except ValueError:
vsiii_6355 = [N.nan, N.nan]
if verbose:
print 'Error in vsiii_6355 computing, vsiii_6355 =', vsiii_6355
indicators = {'rsi': rsi,
'rsis': rsis,
'rsiss': rsiss,
'ewSiiW': ewsiiw,
'ewsiii5972': ewsiii5972,
'ewsiii6355': ewsiii6355,
'vsiii_5972': vsiii_5972,
'vsiii_6355': vsiii_6355}
del self.cranio_r1.simulations
del self.cranio_r2.simulations
del self.cranio_r3.simulations
del self.cranio_r4.simulations
del self.cranio_r1.syst
del self.cranio_r2.syst
del self.cranio_r3.syst
del self.cranio_r4.syst
return indicators
def oxygen_computing(self, smoother="sgfilter", verbose=True, nsimu=1000):
"""Function to compute and return spectral indicators in the end of the spectrum."""
# Test if the computation will be possible
if self.xr is None:
print >> sys.stderr, 'Error, impossible to compute spectral '\
'indictors defined in oxygen zone (maybe no r channel)'
indicators = {'ewoi7773': [N.nan, N.nan],
'ewcaiiir': [N.nan, N.nan]}
return indicators
if verbose:
print >> sys.stderr, '\nINFO: Computing SI on oxygen zone for this spectrum'
# Create zone and craniometers
zone = (self.xr > 6500) & (self.xr < 8800)
self.cranio_O = get_cranio(self.xr[zone],
self.yr[zone],
self.vr[zone],
smoother=smoother,
nsimu=nsimu,
verbose=verbose) # ewoi7773 and caiiir
try:
ewoi7773 = self.cranio_O.ew(7100, 7270, 7720, 8000,
'oi7773',
sup=True,
verbose=verbose)
if verbose:
print 'ewoi7773 computing done, ewoi7773 =', ewoi7773
except ValueError:
ewoi7773 = [N.nan, N.nan]
if verbose:
print 'Error in ewoi7773 computing, ewoi7773 =', ewoi7773
try:
ewcaiiir = self.cranio_O.ew(7720, 8000, 8300, 8800,
'caiiir',
sup=True,
verbose=verbose)
if verbose:
print 'ewcaiiir computing done, ewcaiiir =', ewcaiiir
except ValueError:
ewcaiiir = [N.nan, N.nan]
if verbose:
print 'Error in ewcaiiir computing, ewcaiiir =', ewcaiiir
try:
self.values.update(self.cranio_O.ewvalues)
except ValueError:
pass
indicators = {'ewoi7773': ewoi7773,
'ewcaiiir': ewcaiiir}
del self.cranio_O.simulations
del self.cranio_O.syst
return indicators
def iron_computing(self, smoother="sgfilter", verbose=True, nsimu=1000):
"""Function to compute and return spectral indicators on the iron zone."""
# Test if the computation will be possible
if self.x_merged is None:
print >> sys.stderr, 'Error, impossible to compute spectral '\
'indictors defined in iron zone (maybe no r or b channel)'
indicators = {'ewfe4800': [N.nan, N.nan]}
return indicators
if verbose:
print >> sys.stderr, '\nINFO: Computing SI on iron zone for this spectrum'
# Create zone and craniometers
zone = (self.x_merged > 4350) & (self.x_merged < 5350)
self.cranio_fe = get_cranio(self.x_merged[zone],
self.y_merged[zone],
self.v_merged[zone],
smoother=smoother,
nsimu=nsimu,
verbose=verbose) # ewfe4800
try:
ewfe4800 = self.cranio_fe.ew(4450, 4650, 5050, 5285,
'fe4800',
sup=True,
left2=True,
verbose=verbose)
if verbose:
print 'ewfe4800 computing done, ewfe4800 =', ewfe4800
except ValueError:
ewfe4800 = [N.nan, N.nan]
if verbose:
print 'Error in ewfe4800 computing, ewfe4800 =', ewfe4800
try:
self.values.update(self.cranio_fe.ewvalues)
except ValueError:
pass
indicators = {'ewfe4800': ewfe4800}
del self.cranio_fe.simulations
del self.cranio_fe.syst
return indicators
def initialize_parameters(self):
"""Function to initialize parameters use to make the control_plot."""
try:
rsi = self.cranio_r1.rsivalues['rsi']
except ValueError:
rsi = float(N.nan)
try:
rsis = self.cranio_r1.rsisvalues['rsis']
except ValueError:
rsis = float(N.nan)
try:
rsiss = self.cranio_r5.rsissvalues['rsiss']
except ValueError:
rsiss = float(N.nan)
try:
rca = self.cranio_bca.rcavalues['rca']
except ValueError:
rca = float(N.nan)
try:
rcas = self.cranio_bca.rcasvalues['rcas']
except ValueError:
rcas = float(N.nan)
try:
rcas2 = self.cranio_bca.rcas2values['rcas2']
except ValueError:
rcas2 = float(N.nan)
edca = float(N.nan)
try:
ewcaiihk = self.cranio_bca.ewvalues['ewcaiiHK']
except ValueError:
ewcaiihk = float(N.nan)
try:
ewsiii4000 = self.cranio_bsi.ewvalues['ewsiii4000']
except ValueError:
ewsiii4000 = float(N.nan)
try:
ewmgii = self.cranio_bmg.ewvalues['ewmgii']
except ValueError:
ewmgii = float(N.nan)
try:
ewsiiw = self.cranio_r2.ewvalues['ewSiiW']
except ValueError:
ewsiiw = float(N.nan)
try:
ewsiiw_L = self.cranio_r2.ewvalues['ewSiiW_L']
except ValueError:
ewsiiw_L = float(N.nan)
try:
ewsiiw_r = self.cranio_r2.ewvalues['ewSiiW_r']
except ValueError:
ewsiiw_r = float(N.nan)
try:
ewsiii5972 = self.cranio_r3.ewvalues['ewsiii5972']
except ValueError:
ewsiii5972 = float(N.nan)
try:
ewsiii6355 = self.cranio_r4.ewvalues['ewsiii6355']
except ValueError:
ewsiii6355 = float(N.nan)
try:
vsiii_5972 = self.cranio_r3.velocityvalues['vsiii_5972']
except ValueError:
vsiii_5972 = float(N.nan)
try:
vsiii_6355 = self.cranio_r4.velocityvalues['vsiii_6355']
except ValueError:
vsiii_6355 = float(N.nan)
return rsi, rsis, rsiss, rca, rcas, rcas2, edca, ewcaiihk, ewsiii4000, ewmgii, \
ewsiiw, ewsiii5972, ewsiii6355, vsiii_5972, vsiii_6355, ewsiiw_L, \
ewsiiw_r
# =========================================================================
# Functions to plot control_plot of spectral indicators computing
# =========================================================================
def plot_craniobca(self, metrics, ax=None, filename=''):
"""Plot zone where rca, rcas, rcas2, edca and ewcaiiHK are computed."""
rca, rcas, rcas2, ewcaiihk = metrics[3], metrics[4], metrics[5], metrics[7]
cr = self.cranio_bca
if ax is None:
fig = P.figure()
ax = fig.add_subplot(111)
save = True
else:
save = False
ax.plot(cr.x, cr.y, color='k', label='Flux')
try:
ax.plot(cr.x, cr.s, color='r', label='Interpolated flux')
except ValueError:
print >> sys.stderr, "No smothing function computed, so no '\
'smoothing function ploted"
try: # Plot the rcas vspan
ax.axvspan(cr.rcasvalues['rcas_lbd'][0],
cr.rcasvalues['rcas_lbd'][1],
ymin=0, ymax=1, facecolor='y', alpha=0.25)
ax.axvspan(cr.rcasvalues['rcas_lbd'][2],
cr.rcasvalues['rcas_lbd'][3],
ymin=0, ymax=1, facecolor='y', alpha=0.25)
except ValueError:
print >> sys.stderr, "No parameters to plot rcas zone"
try: # Plot the ewcaiiHK points and lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewcaiiHK'][0])
& (cr.x <= cr.ewvalues['lbd_ewcaiiHK'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewcaiiHK'][0],
cr.ewvalues['lbd_ewcaiiHK'][1]],
[cr.smoother(cr.ewvalues['lbd_ewcaiiHK'])[0],
cr.smoother(cr.ewvalues['lbd_ewcaiiHK'])[1]], 1)
ax.scatter(cr.rcavalues['rca_lbd'],
cr.smoother(cr.rcavalues['rca_lbd']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewcaiiHK zone"
try: # Plot the rca lines
for x, y in zip(cr.rcavalues['rca_lbd'],
cr.smoother(cr.rcavalues['rca_lbd'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot rca zone"
# Annotate the ca zone with spectral indicators values
try:
ax.annotate('rca=%.2f, rcas=%.2f, rcas2=%.2f' %
(rca, rcas, rcas2), xy=(0.01, 0.01),
xycoords='axes fraction',
xytext=(0.01, 0.01), textcoords='axes fraction',
horizontalalignment='left',
verticalalignment='bottom', fontsize=10)
ax.annotate('ewcaiiHK=%.2f' %
(ewcaiihk), xy=(0.01, 0.95), xycoords='axes fraction',
xytext=(0.01, 0.95), textcoords='axes fraction',
horizontalalignment='left',
verticalalignment='bottom', fontsize=10)
except ValueError:
pass
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=3450, xmax=4070)
if save:
fig.savefig('calcium_' + filename)
def plot_craniobsi(self, metrics, ax=None, filename=''):
"""Plot zone where ewsi4000 is computed."""
ewsiii4000 = metrics[8]
cr = self.cranio_bsi
if ax is None:
fig = P.figure()
ax = fig.add_subplot(111)
save = True
else:
save = False
ax.plot(cr.x, cr.y, color='k', label='Flux')
try:
ax.plot(cr.x, cr.s, color='r', label='Interpolated flux')
except ValueError:
print >> sys.stderr, "No smothing function computed, so no '\
'smoothing function ploted"
try: # Plot points and straight lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewsiii4000'][0])
& (cr.x <= cr.ewvalues['lbd_ewsiii4000'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewsiii4000'][0],
cr.ewvalues['lbd_ewsiii4000'][1]],
[cr.smoother(cr.ewvalues['lbd_ewsiii4000'])[0],
cr.smoother(cr.ewvalues['lbd_ewsiii4000'])[1]],
1)
ax.scatter(cr.ewvalues['lbd_ewsiii4000'],
cr.smoother(cr.ewvalues['lbd_ewsiii4000']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot edca straight line"
try: # Plot vlines
for x, y in zip(cr.ewvalues['lbd_ewsiii4000'],
cr.smoother(cr.ewvalues['lbd_ewsiii4000'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot rca vlines"
# Annotate the ca zone with spectral indicators values
try:
ax.annotate('ewsiii4000=%.2f' %
(ewsiii4000), xy=(0.01, 0.01), xycoords='axes fraction',
xytext=(0.01, 0.01), textcoords='axes fraction',
horizontalalignment='left',
verticalalignment='bottom', fontsize=10)
except ValueError:
pass
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=3850, xmax=4150)
if save:
fig.savefig('ewsiii4000_' + filename)
def plot_craniobmg(self, metrics, ax=None, filename=''):
"""Plot zone where ewmgii is computed."""
ewmgii = metrics[9]
cr = self.cranio_bmg
if ax is None:
fig = P.figure()
ax = fig.add_subplot(111)
save = True
else:
save = False
ax.plot(cr.x, cr.y, color='k', label='Flux')
try:
ax.plot(cr.x, cr.s, color='r', label='Interpolated flux')
except ValueError:
print >> sys.stderr, "No smothing function computed, so no '\
'smoothing function ploted"
try: # Plot points and straight lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewmgii'][0])
& (cr.x <= cr.ewvalues['lbd_ewmgii'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewmgii'][0],
cr.ewvalues['lbd_ewmgii'][1]],
[cr.smoother(cr.ewvalues['lbd_ewmgii'])[0],
cr.smoother(cr.ewvalues['lbd_ewmgii'])[1]], 1)
ax.scatter(cr.ewvalues['lbd_ewmgii'],
cr.smoother(cr.ewvalues['lbd_ewmgii']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewmgii straight '\
'line zone"
try: # Plot vlines
for x, y in zip(cr.ewvalues['lbd_ewmgii'],
cr.smoother(cr.ewvalues['lbd_ewmgii'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewmgii vlines"
# Annotate the ca zone with spectral indicators values
try:
ax.annotate('ewmgii=%.2f' %
(ewmgii), xy=(0.01, 0.01), xycoords='axes fraction',
xytext=(0.01, 0.01), textcoords='axes fraction',
horizontalalignment='left',
verticalalignment='bottom', fontsize=10)
except ValueError:
pass
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=4000, xmax=4600)
if save:
fig.savefig('ewmgii_' + filename)
def plot_cranior1r5(self, metrics, ax=None, filename=''):
"""Plot zone where rsi, rsis, rsiss are computed."""
rsi, rsis, rsiss = metrics[0], metrics[1], metrics[2]
cr1 = self.cranio_r1
cr5 = self.cranio_r5
if ax is None:
fig = P.figure()
ax = fig.add_subplot(111)
save = True
else:
save = False
ax.plot(cr5.x, cr5.y, color='k', label='Flux')
try:
ax.plot(cr1.x, cr1.s, color='r', label='Interpolated flux')
except ValueError:
print >> sys.stderr, "No smothing function computed, so no '\
'smoothing function ploted"
# try: #Plot the rsiss vspan
ax.axvspan(cr5.rsissvalues['rsiss_lbd'][0],
cr5.rsissvalues['rsiss_lbd'][1],
ymin=0, ymax=1, facecolor='y', alpha=0.25)
ax.axvspan(cr5.rsissvalues['rsiss_lbd'][2],
cr5.rsissvalues['rsiss_lbd'][3],
ymin=0, ymax=1, facecolor='y', alpha=0.25)
# except ValueError: print >> sys.stderr, "No parameters to plot rsiss
# zone"
if N.isfinite(cr1.rsivalues['rsi']):
# Plot the rsi points and lines
lbd_line1 = cr1.x[(cr1.x >= cr1.rsivalues['rsi_lbd'][0])
& (cr1.x <= cr1.rsivalues['rsi_lbd'][2])]
lbd_line2 = cr1.x[(cr1.x >= cr1.rsivalues['rsi_lbd'][2])
& (cr1.x <= cr1.rsivalues['rsi_lbd'][4])]
p_line1 = N.polyfit([cr1.rsivalues['rsi_lbd'][0],
cr1.rsivalues['rsi_lbd'][2]],
[cr1.smoother(cr1.rsivalues['rsi_lbd'])[0],
cr1.smoother(cr1.rsivalues['rsi_lbd'])[2]], 1)
p_line2 = N.polyfit([cr1.rsivalues['rsi_lbd'][2],
cr1.rsivalues['rsi_lbd'][4]],
[cr1.smoother(cr1.rsivalues['rsi_lbd'])[2],
cr1.smoother(cr1.rsivalues['rsi_lbd'])[4]], 1)
ax.scatter(cr1.rsivalues['rsi_lbd'],
cr1.smoother(cr1.rsivalues['rsi_lbd']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line1, N.polyval(p_line1, lbd_line1), color='g')
ax.plot(lbd_line2, N.polyval(p_line2, lbd_line2), color='g')
for x, y in zip(cr1.rsivalues['rsi_lbd'], # Plot the rsi and rsis lines
cr1.smoother(cr1.rsivalues['rsi_lbd'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
else:
print >> sys.stderr, "No parameters to plot rsi zone"
# Annotate the ca zone with spectral indicators values
try:
ax.annotate('rsi=%.2f, rsis=%.2f, rsiss=%.2f' %
(rsi, rsis, rsiss), xy=(0.01, 0.01),
xycoords='axes fraction',
xytext=(0.01, 0.01), textcoords='axes fraction',
horizontalalignment='left',
verticalalignment='bottom', fontsize=10)
except ValueError:
pass
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=5480, xmax=6500)
if save:
fig.savefig('silicon_' + filename)
def plot_cranior2(self, metrics, ax=None, filename=''):
"""Plot zone where ewsiiw is computed."""
ewsiiw, ewsiiw_L, ewsiiw_r = metrics[10], metrics[15], metrics[16]
cr = self.cranio_r2
if ax is None:
fig = P.figure()
ax = fig.add_subplot(111)
save = True
else:
save = False
ax.plot(cr.x, cr.y, color='k', label='Flux')
try:
ax.plot(cr.x, cr.s, color='r', label='Interpolated flux')
except ValueError:
print >> sys.stderr, "No smothing function computed, so no '\
'smoothing function ploted"
# For ewsiW
try: # Plot points and straight lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewSiiW'][0])
& (cr.x <= cr.ewvalues['lbd_ewSiiW'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewSiiW'][0],
cr.ewvalues['lbd_ewSiiW'][1]],
[cr.smoother(cr.ewvalues['lbd_ewSiiW'])[0],
cr.smoother(cr.ewvalues['lbd_ewSiiW'])[1]], 1)
ax.scatter(cr.ewvalues['lbd_ewSiiW'],
cr.smoother(cr.ewvalues['lbd_ewSiiW']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewSiiW straight '\
'line zone"
try: # Plot vlines
for x, y in zip(cr.ewvalues['lbd_ewSiiW'],
cr.smoother(cr.ewvalues['lbd_ewSiiW'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewSiiW vlines"
# For ewsiW_L
try: # Plot points and straight lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewSiiW_L'][0])
& (cr.x <= cr.ewvalues['lbd_ewSiiW_L'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewSiiW_L'][0],
cr.ewvalues['lbd_ewSiiW_L'][1]],
[cr.smoother(cr.ewvalues['lbd_ewSiiW_L'])[0],
cr.smoother(cr.ewvalues['lbd_ewSiiW_L'])[1]], 1)
ax.scatter(cr.ewvalues['lbd_ewSiiW_L'],
cr.smoother(cr.ewvalues['lbd_ewSiiW_L']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewSiiW_L straight '\
'line zone"
try: # Plot vlines
for x, y in zip(cr.ewvalues['lbd_ewSiiW_L'],
cr.smoother(cr.ewvalues['lbd_ewSiiW_L'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewSiiW_L vlines"
# For ewsiW_r
try: # Plot points and straight lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewSiiW_r'][0])
& (cr.x <= cr.ewvalues['lbd_ewSiiW_r'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewSiiW_r'][0],
cr.ewvalues['lbd_ewSiiW_r'][1]],
[cr.smoother(cr.ewvalues['lbd_ewSiiW_r'])[0],
cr.smoother(cr.ewvalues['lbd_ewSiiW_r'])[1]], 1)
ax.scatter(cr.ewvalues['lbd_ewSiiW_r'],
cr.smoother(cr.ewvalues['lbd_ewSiiW_r']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewSiiW_r straight '\
'line zone"
try: # Plot vlines
for x, y in zip(cr.ewvalues['lbd_ewSiiW_r'],
cr.smoother(cr.ewvalues['lbd_ewSiiW_r'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewSiiW_r vlines"
# Annotate the ca zone with spectral indicators values
try:
ax.annotate('ewSiiW=%.2f' %
(ewsiiw), xy=(0.01, 0.07), xycoords='axes fraction',
xytext=(0.01, 0.07), textcoords='axes fraction',
horizontalalignment='left',
verticalalignment='bottom', fontsize=10)
ax.annotate('ewSiiW_L=%.2f, ewSiiW_r=%.2f' %
(ewsiiw_L, ewsiiw_r), xy=(0.01, 0.01),
xycoords='axes fraction',
xytext=(0.01, 0.01), textcoords='axes fraction',
horizontalalignment='left',
verticalalignment='bottom', fontsize=10)
except ValueError:
pass
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=5060, xmax=5700)
if save:
fig.savefig('ewSiiW_' + filename)
def plot_cranior3r4(self, metrics, ax=None, filename=''):
"""Plot zone where ewSiiW is computed."""
ewsiii5972, ewsiii6355 = metrics[11], metrics[12]
cr3 = self.cranio_r3
cr4 = self.cranio_r4
if ax is None:
fig = P.figure()
ax = fig.add_subplot(111)
save = True
else:
save = False
ax.plot(cr3.x, cr3.y, color='k', label='Flux')
ax.plot(cr4.x, cr4.y, color='k', label='Flux')
try:
ax.plot(cr3.x, cr3.s, color='r', label='Interpolated flux')
except ValueError:
print >> sys.stderr, "No smothing function computed for '\
'ewsiii5972, so no smoothing function ploted"
try:
ax.plot(cr4.x, cr4.s, color='b', label='Interpolated flux')
except ValueError:
print >> sys.stderr, "No smothing function computed for '\
'ewsiii6355, so no smoothing function ploted"
try: # Plot points and straight lines
lbd_line = cr3.x[(cr3.x >= cr3.ewvalues['lbd_ewsiii5972'][0]) &
(cr3.x <= cr3.ewvalues['lbd_ewsiii5972'][1])]
p_line = N.polyfit([cr3.ewvalues['lbd_ewsiii5972'][0],
cr3.ewvalues['lbd_ewsiii5972'][1]],
[cr3.smoother(cr3.ewvalues['lbd_ewsiii5972'])[0],
cr3.smoother(cr3.ewvalues['lbd_ewsiii5972'])[1]], 1)
ax.scatter(cr3.ewvalues['lbd_ewsiii5972'],
cr3.smoother(cr3.ewvalues['lbd_ewsiii5972']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewsiii5972 straight '\
'line zone"
try: # Plot points and straight lines
lbd_line = cr4.x[(cr4.x >= cr4.ewvalues['lbd_ewsiii6355'][0]) &
(cr4.x <= cr4.ewvalues['lbd_ewsiii6355'][1])]
p_line = N.polyfit([cr4.ewvalues['lbd_ewsiii6355'][0],
cr4.ewvalues['lbd_ewsiii6355'][1]],
[cr4.smoother(cr4.ewvalues['lbd_ewsiii6355'])[0],
cr4.smoother(cr4.ewvalues['lbd_ewsiii6355'])[1]], 1)
ax.scatter(cr4.ewvalues['lbd_ewsiii6355'],
cr4.smoother(cr4.ewvalues['lbd_ewsiii6355']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewsiii6355 straight '\
'line zone"
try: # Plot vlines for ewsiii5972
for x, y in zip(cr3.ewvalues['lbd_ewsiii5972'],
cr3.smoother(cr3.ewvalues['lbd_ewsiii5972'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewsiii5972 vlines"
try: # Plot vlines for ewsiii6355
for x, y in zip(cr4.ewvalues['lbd_ewsiii6355'],
cr4.smoother(cr4.ewvalues['lbd_ewsiii6355'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewsiii6355 vlines"
# Annotate the si zone with spectral indicators values
try:
ax.annotate('ewsiii5972=%.2f, ewsiii6355=%.2f' %
(ewsiii5972, ewsiii6355),
xy=(0.01, 0.01), xycoords='axes fraction',
xytext=(0.01, 0.01), textcoords='axes fraction',
horizontalalignment='left', verticalalignment='bottom',
fontsize=10)
except ValueError:
pass
try: # Plot vline for vsiii_6355
ax.axvline(cr4.velocityvalues['vsiii_6355_lbd'],
color='k', lw=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot siii6355 vlines"
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=5500, xmax=6400)
if save:
fig.savefig('ewsiii5972_' + filename)
def plot_spectrum(self, metrics, ax=None, title=None):
"""Plot the spectrum."""
if ax is None:
fig = P.figure()
ax = fig.add_subplot(111)
else:
ax = ax
# Plot spectrum===============================================
ax.plot(self.x, self.y, color='k')
ax.set_xlabel('Wavelength [AA]')
ax.set_ylabel('Flux [erg/s/cm2]')
if title is not None:
ax.set_title('%s' % title)
def control_plot(self, filename='', title=None, oformat='png'):
"""
Make a general control plot.
Options:
filename: (string) filename of the png created. Should end in .png
title: (string) optional title of the control plot. Passing SN
name and exp_code through it is a good idea.
"""
# Initialize parameters=====================================
metrics = self.initialize_parameters()
if self.xb is not None and self.xr is not None:
fig = P.figure(figsize=(14, 12))
ax1 = fig.add_subplot(3, 3, 1)
ax2 = fig.add_subplot(3, 3, 2)
ax3 = fig.add_subplot(3, 3, 3)
ax4 = fig.add_subplot(3, 3, 4)
ax5 = fig.add_subplot(3, 3, 5)
ax6 = fig.add_subplot(3, 3, 6)
ax7 = fig.add_subplot(3, 1, 3)
self.plot_craniobca(metrics, ax=ax1, filename=filename)
self.plot_craniobsi(metrics, ax=ax2, filename=filename)
self.plot_craniobmg(metrics, ax=ax3, filename=filename)
self.plot_cranior1r5(metrics, ax=ax4, filename=filename)
self.plot_cranior2(metrics, ax=ax5, filename=filename)
self.plot_cranior3r4(metrics, ax=ax6, filename=filename)
self.plot_spectrum(metrics, ax=ax7, title=title)
ax7.set_ylim(ymin=0)
ax7.set_xlim(xmin=3000, xmax=7000)
if filename is None:
filename = "control_plot"
fig.savefig(filename + '.' + oformat)
print >> sys.stderr, "Control plot saved in %s" % filename + '.' + oformat
elif self.xb is not None:
print >> sys.stderr, 'Worked on the b channel only'
fig = P.figure(figsize=(12, 8))
ax1 = fig.add_subplot(2, 3, 1)
ax2 = fig.add_subplot(2, 3, 2)
ax3 = fig.add_subplot(2, 3, 3)
ax7 = fig.add_subplot(2, 1, 2)
self.plot_craniobca(metrics, ax=ax1, filename=filename)
self.plot_craniobsi(metrics, ax=ax2, filename=filename)
self.plot_craniobmg(metrics, ax=ax3, filename=filename)
self.plot_spectrum(metrics, ax=ax7, title=title)
ax7.set_ylim(ymin=0)
ax7.set_xlim(xmin=self.xb[0], xmax=self.xb[-1])
if filename is None:
filename = "control_plot"
if title is not None:
ax7.set_title('%s, calcium zone' % title)
else:
ax7.set_title('calcium zone')
fig.savefig(filename + '.' + oformat)
print >> sys.stderr, "Control plot saved in %s" % filename + '.' + oformat
elif self.xr is not None:
print >> sys.stderr, 'Worked on the r channel only'
fig = P.figure(figsize=(12, 8))
ax4 = fig.add_subplot(2, 3, 1)
ax5 = fig.add_subplot(2, 3, 2)
ax6 = fig.add_subplot(2, 3, 3)
ax7 = fig.add_subplot(2, 1, 2)
self.plot_cranior1r5(metrics, ax=ax4, filename=filename)
self.plot_cranior2(metrics, ax=ax5, filename=filename)
self.plot_cranior3r4(metrics, ax=ax6, filename=filename)
self.plot_spectrum(metrics, ax=ax7, title=title)
ax7.set_ylim(ymin=0)
ax7.set_xlim(xmin=self.xr[0], xmax=7000)
if filename is None:
filename = "control_plot"
if title is not None:
ax7.set_title('%s, silicon zone' % title)
else:
ax7.set_title('silicon zone')
fig.savefig(filename + '.' + oformat)
print >> sys.stderr, "Control plot saved in %s" % filename + '.' + oformat
P.close()
def plot_oxygen(self, filename='', title=None, oformat='png'):
"""Plot oxygen zone."""
cr = self.cranio_O
fig = P.figure()
ax = fig.add_subplot(111)
ax.plot(cr.x, cr.y, 'k', label='Flux')
ax.plot(cr.x, cr.s, 'r', label='Interpolated flux')
try: # Plot points and straight lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewoi7773'][0])
& (cr.x <= cr.ewvalues['lbd_ewoi7773'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewoi7773'][0],
cr.ewvalues['lbd_ewoi7773'][1]],
[cr.smoother(cr.ewvalues['lbd_ewoi7773'])[0],
cr.smoother(cr.ewvalues['lbd_ewoi7773'])[1]], 1)
ax.scatter(cr.ewvalues['lbd_ewoi7773'],
cr.smoother(cr.ewvalues['lbd_ewoi7773']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewoi7773 straight line"
try: # Plot vlines
for x, y in zip(cr.ewvalues['lbd_ewoi7773'],
cr.smoother(cr.ewvalues['lbd_ewoi7773'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewoi7773 vlines\n"
try: # Plot points and straight lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewcaiiir'][0])
& (cr.x <= cr.ewvalues['lbd_ewcaiiir'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewcaiiir'][0],
cr.ewvalues['lbd_ewcaiiir'][1]],
[cr.smoother(cr.ewvalues['lbd_ewcaiiir'])[0],
cr.smoother(cr.ewvalues['lbd_ewcaiiir'])[1]], 1)
ax.scatter(cr.ewvalues['lbd_ewcaiiir'],
cr.smoother(cr.ewvalues['lbd_ewcaiiir']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewcaiiir straight line"
try: # Plot vlines
for x, y in zip(cr.ewvalues['lbd_ewcaiiir'],
cr.smoother(cr.ewvalues['lbd_ewcaiiir'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewcaiiir vlines\n"
# Try to Annotate with spectral indicators values
try:
ax.annotate('ewoi7773=%.2f, ewcaiiir=%.2f' %
(cr.ewvalues['ewoi7773'],
cr.ewvalues['ewcaiiir']),
xy=(0.01, 0.01), xycoords='axes fraction',
xytext=(0.01, 0.01), textcoords='axes fraction',
horizontalalignment='left', verticalalignment='bottom',
fontsize=10)
except ValueError:
pass
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=cr.x[0], xmax=cr.x[-1])
ax.set_xlabel('Wavelength [AA]')
ax.set_ylabel('Flux [erg/s/cm2]')
if title is not None:
ax.set_title(title)
fig.savefig(filename + '.' + oformat)
print >> sys.stderr, "Control plot for oxygen zone saved in %s" \
% filename + '.' + oformat
P.close()
def plot_iron(self, filename='', title=None, oformat='png'):
"""Plot iron zone."""
cr = self.cranio_fe
fig = P.figure()
ax = fig.add_subplot(111)
ax.plot(cr.x, cr.y, 'k', label='Flux')
ax.plot(cr.x, cr.s, 'r', label='Interpolated flux')
try: # Plot points and straight lines
lbd_line = cr.x[(cr.x >= cr.ewvalues['lbd_ewfe4800'][0])
& (cr.x <= cr.ewvalues['lbd_ewfe4800'][1])]
p_line = N.polyfit([cr.ewvalues['lbd_ewfe4800'][0],
cr.ewvalues['lbd_ewfe4800'][1]],
[cr.smoother(cr.ewvalues['lbd_ewfe4800'])[0],
cr.smoother(cr.ewvalues['lbd_ewfe4800'])[1]], 1)
ax.scatter(cr.ewvalues['lbd_ewfe4800'],
cr.smoother(cr.ewvalues['lbd_ewfe4800']),
s=40, c='g', marker='o', edgecolors='none',
label='_nolegend_')
ax.plot(lbd_line, N.polyval(p_line, lbd_line), color='g')
except ValueError:
print >> sys.stderr, "No parameters to plot ewfe4800 straight line"
try: # Plot vlines
for x, y in zip(cr.ewvalues['lbd_ewfe4800'],
cr.smoother(cr.ewvalues['lbd_ewfe4800'])):
ax.vlines(x, 0, y, color='g', linewidth=1, label='_nolegend_')
except ValueError:
print >> sys.stderr, "No parameters to plot ewfe4800 vlines\n"
# Try to Annotate with spectral indicators values
try:
ax.annotate('ewfe4800=%.2f' % (cr.ewvalues['ewfe4800']),
xy=(0.01, 0.01), xycoords='axes fraction',
xytext=(0.01, 0.01), textcoords='axes fraction',
horizontalalignment='left', verticalalignment='bottom',
fontsize=10)
except ValueError:
pass
ax.set_ylim(ymin=0)
ax.set_xlim(xmin=cr.x[0], xmax=cr.x[-1])
ax.set_xlabel('Wavelength [AA]')
ax.set_ylabel('Flux [erg/s/cm2]')
if title is not None:
ax.set_title(title)
fig.savefig(filename + '.' + oformat)
print >> sys.stderr, "Control plot for iron zone saved in %s" %\
filename + '.' + oformat
P.close()
| mit |
pompiduskus/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
SHDShim/pytheos | examples/6_p_scale_test_Dorogokupets2007_Au.py | 1 | 1404 |
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('cat', '0Source_Citation.txt')
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
# %matplotlib notebook # for interactive
# For high dpi displays.
# In[3]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
# # 0. General note
# This example compares pressure calculated from `pytheos` and original publication for the gold scale by Dorogokupets 2007.
# # 1. Global setup
# In[4]:
import matplotlib.pyplot as plt
import numpy as np
from uncertainties import unumpy as unp
import pytheos as eos
# # 3. Compare
# In[5]:
eta = np.linspace(1., 0.65, 8)
print(eta)
# In[6]:
dorogokupets2007_au = eos.gold.Dorogokupets2007()
# In[7]:
help(dorogokupets2007_au)
# In[8]:
dorogokupets2007_au.print_equations()
# In[9]:
dorogokupets2007_au.print_equations()
# In[10]:
dorogokupets2007_au.print_parameters()
# In[11]:
v0 = 67.84742110765599
# In[12]:
dorogokupets2007_au.three_r
# In[13]:
v = v0 * (eta)
temp = 2500.
# In[14]:
p = dorogokupets2007_au.cal_p(v, temp * np.ones_like(v))
# <img src='./tables/Dorogokupets2007_Au.png'>
# In[15]:
print('for T = ', temp)
for eta_i, p_i in zip(eta, p):
print("{0: .3f} {1: .2f} ".format(eta_i, p_i))
# In[16]:
v = dorogokupets2007_au.cal_v(p, temp * np.ones_like(p), min_strain=0.6)
print(1.-(v/v0))
| apache-2.0 |
sosl/LOFAR-processing | scheduling/observePulsars.py | 1 | 38161 | #!/usr/bin/env python
# coding=utf-8
"""
Python module / application to create a pulsar observation schedule with good visibility
Jörn Künsemöller
"""
import errno
import argparse
import dateutil.parser
import astropysics.obstools as obstools
import astropysics.coords as coords
import matplotlib
matplotlib.use('Agg')
import datetime
import os
import re
from matplotlib.dates import HOURLY, DateFormatter, rrulewrapper, RRuleLocator
from pylab import *
import subprocess
import time
# Default Settings:
defaultObservationCmd = "~/PSR_8bit_Scripts/Observe_DE601_C34.py"
defaultOutputPath = os.getcwd()+"/schedule.txt"
defaultAllowIdle = False
defaultIdlePenalty = 2
defaultInitialRetries = 20
defaultTimePenalty = 1
defaultDropPenalty = 500
defaultLocation='DE609'
defaultLogPath= os.getcwd()
defaultObserverSetupTime = 2
defaultInputPath = None
# Config:
maxObsDays = 4 # in case no limit is specified
sites = dict(DE609=obstools.Site(lat=coords.AngularCoordinate('53d42m0s'),
long=coords.AngularCoordinate('9d58m50s'),
name="DE609"),
DE601=obstools.Site(lat=coords.AngularCoordinate('50d31m0s'),
long=coords.AngularCoordinate('6d53m0s'),
name="DE601"),
DE602=obstools.Site(lat=coords.AngularCoordinate('48d30m4s'),
long=coords.AngularCoordinate('11d17m13s'),
name="DE602"),
DE603=obstools.Site(lat=coords.AngularCoordinate('50d59m0s'),
long=coords.AngularCoordinate('11d43m0s'),
name="DE603"),
DE605=obstools.Site(lat=coords.AngularCoordinate('50d55m0s'),
long=coords.AngularCoordinate('6d21m0s'),
name="DE605"),
SE607=obstools.Site(lat=coords.AngularCoordinate('57d23m54s'),
long=coords.AngularCoordinate('11d55m50s'),
name="SE607")
)
colorNormalizeBadness = 300.0
verbose = 1
# Tuning config: Faster processing vs. quality:
minshift = datetime.timedelta(seconds=60) # stop fair shifting for small overlaps and flatten out schedule
#initialRetries = defaultInitialRetries # Minimize penalty over so many initial spreadings on observation days (random sequence order)
# Stefan: added an option for initialRetries and a default value
# Create directories with tolerance for existence, exception handling...
def safeMakedirs(directory, permissions):
try:
directory = os.path.normpath(os.path.expanduser(directory))
if verbose > 0:
print "...creating directory", directory
os.makedirs(directory,permissions)
if verbose > 0:
print "...done."
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(directory):
if verbose > 0:
print "...done. (Already there.)"
pass
else:
if verbose > 0:
print "! ERROR--> Could not create directory: ", err.message
exit(2)
# Determine total overlap in given schedule (in minutes)
def determineoverlap(schedule):
schedule = sorted(schedule, key=lambda x: x[3])
overlap = 0
tmpschedule = list(schedule)
for (pulsar, start, stop, optimalUTC, priority) in schedule:
tmpschedule.remove((pulsar, start, stop, optimalUTC, priority))
for(comparepulsar, comparestart, comparestop, compareoptimalUTC, comparepriority) in tmpschedule:
overlapping = min(stop, comparestop) - max(start, comparestart)
if overlapping.days >= 0: # we have overlap
overlap += overlapping.days * 24 * 60 + overlapping.seconds / 60
return overlap
# Estimate required total shift to solve all overlaps in given schedule (in minutes)
# Note that this only returns a lower limit! (when several observations overlap, each
# shift might impact the following shifts, which is not considered here...)
def estimateshift(schedule):
schedule = sorted(schedule, key=lambda x: x[3])
shift = 0
tmpschedule = list(schedule)
for (pulsar, start, stop, optimalUTC, priority) in schedule:
tmpschedule.remove((pulsar, start, stop, optimalUTC, priority))
for (comparepulsar, comparestart, comparestop, compareoptimalUTC, comparepriority) in tmpschedule:
thisshift = min((stop - comparestart), (comparestop - start))
if thisshift.days >= 0: # we have to shift
shift += thisshift.days * 24 * 60 + thisshift.seconds / 60
return shift
# time badness of single observation
def determinetimebadness(start, stop, optimalUTC, timePenalty=defaultTimePenalty):
delta = (optimalUTC - start) + (optimalUTC - stop)
if delta.days < 0:
delta = -delta
badness = delta.days * 24 * 60 + delta.seconds / 60 * timePenalty
return badness
# total time badness of alls observation in a schedule
def determinetotaltimebadness(schedule, timePenalty=defaultTimePenalty):
badness = 0
for (pulsar, start, stop, optimalUTC, priority) in schedule:
badness += determinetimebadness(start, stop, optimalUTC)
return badness
# display gantt chart of a schedule
def plotschedule(begin, end, deadline, schedule, title='Schedule',
timePenalty=defaultTimePenalty, blocking=True, display=False):
fig = plt.figure()
fig.set_size_inches(18,13.5)
ax = fig.add_axes([0.2,0.2,0.75,0.7]) #[left,bottom,width,height]
ax.set_title(title)
ax.axvline(x=begin, color="green")
if end is not None:
ax.axvline(x=end, color="blue")
if deadline is not None:
ax.axvline(x=deadline, color="red")
# Plot the data
for (pulsar,start,stop, optimalUTC, priority) in reversed(schedule):
start_date = matplotlib.dates.date2num(start)
stop_date = matplotlib.dates.date2num(stop)
duration = stop_date - start_date
optimal_start = matplotlib.dates.date2num(optimalUTC) - 0.5*duration
ax.axhline(y=schedule.index((pulsar,start,stop, optimalUTC, priority)), color=(0.8, 0.8, 0.8), zorder=0)
ax.barh((schedule.index((pulsar,start,stop, optimalUTC, priority))), duration, left=optimal_start, height=0.5,align='center',label=pulsar, color=(0.9, 0.9, 0.9), edgecolor = "none")
ax.barh((schedule.index((pulsar,start,stop, optimalUTC, priority))), duration, left=start_date, height=0.6, align='center',label=pulsar, color=(min(determinetimebadness(start, stop, optimalUTC, timePenalty), int(colorNormalizeBadness))/colorNormalizeBadness, 0.0, 0.0))
# Format y-axis
pos = range(len(schedule))
locsy, labelsy = yticks(pos, zip(*schedule)[0])
plt.setp(labelsy,size='medium')
# Format x-axis
ax.axis('tight')
ax.xaxis_date()
rule = rrulewrapper(HOURLY, interval=3)
loc = RRuleLocator(rule)
formatter = DateFormatter("%d %h - %H h")
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_major_formatter(formatter)
labelsx = ax.get_xticklabels()
plt.setp(labelsx, rotation=30, fontsize=10)
if begin is None:
begin = datetime.datetime.now()
if deadline is not None:
plt.xlim(begin - datetime.timedelta(hours=1) ,deadline + datetime.timedelta(hours=1))
elif end is not None:
plt.xlim(begin - datetime.timedelta(hours=1) ,end + datetime.timedelta(hours=1))
else:
plt.xlim(begin)
fig.autofmt_xdate()
plt.savefig('gantt.svg')
plt.show(block=blocking)
# shifts observations in the schedule to get rid of any overlap. Some observations may be dropped in the process.
# Note: This is not very efficient!
def solveschedule(begin, end, deadline, schedule, timePenalty=defaultTimePenalty, dropPenalty=defaultDropPenalty):
if verbose > 1:
print "...Solving overlap in schedule"
# sort by optimalUTC
schedule = sorted(schedule, key=lambda x: x[3])
# solve overlap:
shifting = True # observations were shifted in last run
dropping = True # observations were dropped in last run
count = 0
while shifting or dropping:
if verbose > 2:
print " ...Next interation"
if dropping:
tmpschedule = list(schedule) # start again with fresh schedule (where observations have optimal timing)
dropping = False
shifting = False
fixed = []
for (pulsar, start, stop, optimalUTC, priority) in tmpschedule:
if verbose > 2:
print " ...Now handling observation of ",pulsar
i = tmpschedule.index((pulsar, start, stop, optimalUTC, priority))
if determinetimebadness(start, stop, optimalUTC, timePenalty) > dropPenalty * int(priority):
if verbose > 2:
print " ...Dropping",pulsar,"due to high time badness",determinetimebadness(start, stop, optimalUTC, timePenalty), dropPenalty
schedule.pop(i)
dropping = True
break
if i == len(tmpschedule)-1:
break
(nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority) = tmpschedule[i+1]
deltanext = stop - nextstart
if deltanext.days >= 0 and deltanext.seconds > 0:
shifting = True
if verbose > 3:
print " ...Conflicting with next observation by ", deltanext
#deltaformer = start - begin
# if deltanext/2 > deltaformer:
# if verbose > 3:
# print " ...Not enough space for early start. Shifting backwards by", deltaformer," and pushing",nextpulsar," observation by", deltanext - deltaformer
# shift = (begin - start)
# start = start + shift
# stop = stop + shift
# nextstart = nextstart + (deltanext - shift)
# nextstop = nextstop + (deltanext - shift)
# fixed.append((pulsar, start, stop, optimalUTC, priority))
# fixed.append((nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority))
# el
if (pulsar, start, stop, optimalUTC, priority) in fixed:
if verbose > 3:
print " ...",pulsar,"is fixed. Shifting",nextpulsar,"by", deltanext
dropped = False
for (tmppulsar, tmpstart, tmpstop, tmpUTC, tmppriority) in fixed:
if (nextstop + deltanext) > tmpstart and (nextstop + deltanext) < tmpstop:
if verbose > 3:
print " ...cannot shift", nextpulsar, "since the new time is blocked by", tmppulsar
tmpschedule.remove(nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority)
dropped = True
break
if not dropped:
nextstart += deltanext
nextstop += deltanext
fixed.append((nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority))
if verbose > 3:
print " ...", nextpulsar,"was fixed!"
elif (nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority) in fixed:
if verbose > 3:
print " ...",nextpulsar,"is fixed. Shifting",pulsar,"by", deltanext
dropped = False
for (tmppulsar, tmpstart, tmpstop, tmpUTC, tmppriority) in fixed:
if (nextstop + deltanext) > tmpstart and (nextstop + deltanext) < tmpstop:
if verbose > 3:
print " ...cannot shift", pulsar, "since the new time is blocked by", tmppulsar
tmpschedule.remove(pulsar, start, stop, optimalUTC, priority)
dropped = True
break
if not dropped:
start -= deltanext
stop -= deltanext
fixed.append((pulsar, start, stop, optimalUTC, priority))
if verbose > 3:
print " ...", pulsar,"was fixed!"
elif deltanext < minshift:
if verbose > 3:
print " ...Minimum for shift splitting is not reached. Shifting",nextpulsar,"by", deltanext
nextstart += deltanext
nextstop += deltanext
else:
shift = (deltanext / (int(nextpriority) + int(priority))) * int(nextpriority)
nextshift = (deltanext / (int(nextpriority) + int(priority))) * int(priority)
if verbose > 3:
print " ...Shifting",pulsar,"by", shift,"and",nextpulsar,"by", nextshift, " --> ",priority," vs. ", nextpriority
start -= shift
stop -= shift
nextstart += nextshift
nextstop += nextshift
tmpschedule[i] = (pulsar,start,stop, optimalUTC, priority)
tmpschedule[i+1] = (nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority)
count += 1
if count%10000 == 0:
print " ...action counter:", count
# plotschedule(begin, end, deadline, tmpschedule, "count: "+str(count)+" - "+str(determineoverlap(tmpschedule)),blocking=True)
# drop all observation that exceed the deadline
tmpschedule = sorted(tmpschedule, key=lambda x: x[3])
toremove = []
for (pulsar, start, stop, optimalUTC, priority) in tmpschedule:
if deadline is not None and stop > deadline:
if verbose > 1:
print " ...dropping observation of", pulsar,"because of strict deadline overhang. (Maybe your schedule is too crowded?)"
toremove.append((pulsar, start, stop, optimalUTC, priority))
if start < begin:
if verbose > 1:
print " ...dropping observation of", pulsar,"because of too early start. (Maybe your schedule is too crowded?)"
toremove.append((pulsar, start, stop, optimalUTC, priority))
# remove separately to avoid interference with first loop
for (pulsar, start, stop, optimalUTC, priority) in toremove:
tmpschedule.remove((pulsar, start, stop, optimalUTC, priority))
return tmpschedule
# turns a list of planned observations into a schedule within the specified time constraints
def makeSchedule(observationList, site, begin, end, deadline, timePenalty,
idlePenalty, dropPenalty, initialRetries):
if verbose > 0:
print "Creating schedule for",len(observationList),"observations"
print "...site is", site.name+ ", lat:",site.latitude,"long:",site.longitude
schedule = []
# time constraints:
softTimeframe = None
strictTimeframe = None
if begin is None:
begin = datetime.datetime.now()
if verbose > 0:
print "! Begin time is set to NOW."
if verbose > 0:
print "...begin UTC:",begin
if end is not None:
softTimeframe = end-begin
if verbose > 0:
print "...end UTC:", end
print "...soft timeframe duration:", softTimeframe
if deadline is not None:
strictTimeframe = deadline-begin
if verbose > 0:
print "...deadline UTC:", deadline
print "...strict timeframe duration:", strictTimeframe
if end is None and deadline is None and verbose > 0:
print "...no time limits specified"
# create initial schedule with optimal timing (but overlap):
for (pulsar, dur, priority) in observationList:
duration = int(dur)
lststring = re.split("[JB+-]", pulsar)[1]
if verbose > 2:
print "...shall observe pulsar",pulsar,"for", duration,"minutes..."
optimalLST = begin.replace(hour=int(lststring[:2]), minute=int(lststring[2:]), second=0, microsecond=0)
optimalUTC = site.localTime(lsts=optimalLST.hour+optimalLST.minute/60.0, date=optimalLST.date() , returntype='datetime', utc=True)
if verbose > 2:
print " ...optimal visibility at LST:",optimalLST
optimalUTC = optimalUTC.replace(tzinfo=None)
if verbose > 2:
print " ...optimal visibility at UTC:",optimalUTC
start = optimalUTC - datetime.timedelta(seconds=30*duration)
stop = optimalUTC + datetime.timedelta(seconds=30*duration)
if verbose > 2:
print " ...optimal UTC observation time start:", start,"stop:",stop
job = (pulsar, start, stop, optimalUTC, priority)
schedule.append(job)
# sort initial schedule by optimal UTC
schedule = sorted(schedule, key=lambda x: x[3])
if verbose > 2:
# print schedule
print "...These are the optimal observation times in sequential order:"
print "... ---------------------"
for (pulsar, start, stop, optimalUTC, priority) in schedule:
print "... pulsar:",pulsar,"\tfrom:", start," to:", stop," badness:", (optimalUTC - start) + (optimalUTC - stop)
print "... ---------------------"
bestschedule = None
# for several random sequence orders:
# for each observation in schedule:
# check observation time limits and move to day with lowest overlap
orig_schedule = list(schedule)
best_i = None
for i in range(initialRetries):
if verbose > 2:
print "...Run", i,"..."
schedule = list(orig_schedule)
shuffle(schedule)
for (pulsar, start, stop, optimalUTC, priority) in schedule:
if verbose > 2:
print "...",pulsar
obsdays = maxObsDays
if deadline is not None:
obsdays = (deadline.date() - begin.date()).days + 1
elif end is not None:
obsdays = (end.date() - begin.date()).days + 1
# create base schedule (copy schedule, drop observation in focus)
dropschedule = list(schedule)
dropschedule.remove((pulsar, start, stop, optimalUTC, priority))
dropbadness = estimateshift(dropschedule) + dropPenalty * int(priority)
# Badness for each day of the observation time
# Schedule for each day
badnesses = [0]*obsdays
newschedules = [list(dropschedule) for _ in range(obsdays)]
# check for time limits and determine badness for each observation day
shiftbadness = 0
for day in range(obsdays):
#print " ...considering scheduling",pulsar,"on day ", day
deltabegin = begin - (start + datetime.timedelta(days=day))
if deltabegin > datetime.timedelta(0):
if verbose > 2:
print " ..."+pulsar+" starts too early on day",day," -- necessary shift:", deltabegin
badness = deltabegin.seconds / 60.0 * timePenalty
badnesses[day] += badness
elif deadline is not None:
deltadeadline = deadline - (stop + datetime.timedelta(days=day))
if deltadeadline < datetime.timedelta(0):
if verbose > 2:
print " ..."+pulsar+" stops too late for strict limit on day",day," -- necessary shift:", deltadeadline
badness = -deltadeadline.days * 24 * 60 - deltadeadline.seconds / 60.0 * timePenalty
badnesses[day] += badness
if end is not None:
deltaend = end - (stop + datetime.timedelta(days=day))
if deltaend < datetime.timedelta(0):
if deltadeadline is not None:
deltaend -= deltadeadline # no penalty for performed deadline shift
badness = -deltaend.days * 24 * 60 - deltaend.seconds / 60.0 * timePenalty
badnesses[day] += badness
if verbose > 2:
print " ..."+pulsar+" stops too late for soft limit on day",day," -- overtime:", deltaend, optimalUTC
# new schedule where the observation is moved to the the day in focus:
if deltabegin > datetime.timedelta(0):
newschedules[day].append((pulsar, start + datetime.timedelta(days=day) + deltabegin, stop + datetime.timedelta(days=day) + deltabegin, optimalUTC+datetime.timedelta(days=day), priority))
elif deadline is not None and deltadeadline < datetime.timedelta(0):
newschedules[day].append((pulsar, start + datetime.timedelta(days=day) + deltadeadline, stop + datetime.timedelta(days=day) + deltadeadline, optimalUTC+datetime.timedelta(days=day),priority))
else:
newschedules[day].append((pulsar, start + datetime.timedelta(days=day), stop + datetime.timedelta(days=day), optimalUTC+datetime.timedelta(days=day),priority))
# Reset for next day
badness = 0
# time penalty for the day: shifting of observations results in time penalty
badnesses[day] += estimateshift(newschedules[day]) * timePenalty
# Determine best day for the observation (spreads observations amongst observation days):
if verbose > 2:
print " ..."+pulsar,"-- dropbadness:",dropbadness,"-- day badnesses:", badnesses
bestday = badnesses.index(min(badnesses))
if bestday > dropbadness:
# drop pulsar
if verbose > 2:
print " ...dropping ", pulsar, dropbadness,"<",badnesses[bestday]
schedule = dropschedule
else:
# shift on start day or move to 'bestday'
if verbose > 2:
print " ...scheduling for observation day", bestday
schedule = newschedules[bestday]
# Create simple sequential schedule as initial solution:
if bestschedule is None:
schedule = sorted(schedule, key=lambda x: x[1])
bestschedule = []
laststop = None
for j in range(len(schedule)):
(pulsar, start, stop, optimalUTC, priority) = schedule[j]
if j == 0:
dur = stop - start
start = begin
stop = start + dur
elif laststop is not None:
delta = start - laststop
start -= delta
stop -= delta
laststop = stop
bestschedule.append((pulsar, start, stop, optimalUTC, priority))
if verbose > 0:
print "...Sequential base schedule -->",determinetotaltimebadness(bestschedule, timePenalty)
plotschedule(begin, end, deadline, bestschedule,'Base schedule',timePenalty, True, True )
# randomly drop observations as long as this improves the total result
bestsolvedschedule = solveschedule(begin, end, deadline, list(schedule), timePenalty, dropPenalty)
dropping = True
shuffle(schedule)
while dropping:
dropping = False
penalty = 0
for (pulsar, start, stop, optimalUTC, priority) in schedule:
if verbose > 1:
print "...Trying to improve result by dropping", pulsar
dropschedule = list(schedule)
dropschedule.remove((pulsar, start, stop, optimalUTC, priority))
penalty += dropPenalty * int(priority)
dropschedule = solveschedule(begin, end, deadline, dropschedule, timePenalty, dropPenalty)
if determinetotaltimebadness(bestsolvedschedule, timePenalty) > determinetotaltimebadness(dropschedule, timePenalty) + penalty:
if verbose > 1:
print "...Run",i,"- Dropping",pulsar,"improved result from", determinetotaltimebadness(bestsolvedschedule, timePenalty),"to",determinetotaltimebadness(dropschedule, timePenalty),"+",penalty
bestsolvedschedule = dropschedule
dropping = True
break
schedule = bestsolvedschedule
# Is this result better then the best so far?
if verbose > 0:
print "...Run",i,"-->",determinetotaltimebadness(schedule, timePenalty),'+', dropPenalty * (len(orig_schedule) - len(schedule))
if determineoverlap(bestschedule) > 0 or determinetotaltimebadness(schedule, timePenalty) < determinetotaltimebadness(bestschedule, timePenalty) + dropPenalty * (len(schedule) - len(bestschedule)):
bestschedule = schedule
best_i = i
if verbose > 0:
if best_i is not None:
print "...Found best schedule in",i+1,"runs. (i = "+str(best_i)+")"
else:
print "...Base schedule was already the best."
bestschedule = sorted(bestschedule, key=lambda x: x[1])
return bestschedule
# Extends observations to fill up idle times:
def fillIdleTimes(begin, end, deadline, schedule):
schedule = sorted(schedule, key=lambda x: x[1])
if verbose > 0:
print "...Now extending scheduled observations to fill idle time..."
for i in range(len(schedule)-1):
(pulsar, start, stop, optimalUTC, priority) = schedule[i]
(nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority) = schedule[i+1]
deltanext = nextstart - stop
# remove old observations
schedule.remove((pulsar, start, stop, optimalUTC, priority))
schedule.remove((nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority))
if deltanext > datetime.timedelta(seconds=2):
if verbose > 0:
print " ...extending observations of",pulsar,"and",nextpulsar,"by",deltanext/2
# modify times:
stop += deltanext/2
nextstart -= deltanext/2
if i == 0:
if begin is None:
begin = datetime.datetime.now() - datetime.timedelta(minutes = 2)
if verbose > 0:
print " ...extending observation of",pulsar,"by",start-begin,"to start from beginning of observation time"
start = begin
if i == len(schedule): # Note: It's two short at this point since the currently modified obs were removed above!
if end is not None:
if verbose > 0:
print " ...extending observation of",nextpulsar,"by",end-stop,"to stop at the end of observation time"
nextstop = end
elif deadline is not None:
if verbose > 0:
print " ...extending observation of",nextpulsar,"by",deadline-stop,"to stop at the deadline"
nextstop = deadline
# save modified observations to schedule
schedule.append((pulsar, start, stop, optimalUTC, priority))
schedule.append((nextpulsar, nextstart, nextstop, nextoptimalUTC, nextpriority))
schedule = sorted(schedule, key=lambda x: x[1])
return schedule
# Print schedule to stdout
def printSchedule(schedule, dropObservationList, timePenalty, dropPenalty, msg):
print msg
print "---------------------"
for (pulsar, start, stop, optimalUTC, priority) in schedule:
print "pulsar:",pulsar,"\tfrom:", start," to:", stop," badness:", determinetimebadness(start,stop,optimalUTC, timePenalty)
print "---------------------"
dropped = 0
for(pulsar, dur, priority) in dropObservationList:
print "Dropped:", pulsar, dur, priority
dropped += 1 * int(priority)
print "---------------------"
print "Overlap control value (should be zero):",determineoverlap(schedule)
print "Total time badness:", determinetotaltimebadness(schedule, timePenalty)
print "Total overall badness:", determinetotaltimebadness(schedule, timePenalty) + dropPenalty * dropped
print "---------------------"
# Call observer scripts for each item in the schedule:
def observe(schedule, observationCmd):
observerSetupTime = defaultObserverSetupTime
schedule = sorted(schedule, key=lambda x: x[1])
for (pulsar, start, stop, optimalUTC, priority) in schedule:
while datetime.datetime.now() < start - datetime.timedelta(minutes = 1):
if verbose > 0:
print " ...gotta wait! It's", datetime.datetime.now(),"but next observation starts at", start
delta = start - datetime.datetime.now()
time.sleep(delta.seconds)
if verbose > 0:
print " ...It's",datetime.datetime.now()," - Starting observation of pulsar", pulsar ,"("+str(datetime.datetime.now()-start)," off)"
cmd = observationCmd+" -p " +pulsar +" -T "+str((stop-start).seconds / 60 - observerSetupTime)
if verbose > 1:
print " ...calling",cmd
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
print process.communicate()[0]
# Main application
#def main(argv):
def main():
observationList = []
observationCmd = defaultObservationCmd
keepIdle = defaultAllowIdle
timePenalty = defaultTimePenalty
idlePenalty = defaultIdlePenalty
initialRetries = defaultInitialRetries
dropPenalty = defaultDropPenalty
outputPath=defaultOutputPath
inputPath=defaultInputPath
location=defaultLocation
logPath=defaultLogPath
global verbose
begin = None
end = None
deadline = None
#parser = argparse.ArgumentParser(description="This code will create a"
#" schedule by randomly shuffling the input list of pulsars to find the"
#" optimal observing time")
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output-file', dest='outputPath', help="Output file")
parser.add_argument('-b', '--begin-date', nargs=2, dest='begin', help="Start date, format mm.dd.yy hh:mm")
parser.add_argument('-e', '--end-date', nargs=2, dest='end', help="End date, format mm.dd.yy hh:mm")
parser.add_argument('-d', '--deadline-date', nargs=2, dest='deadline', help="Strict end date, format mm.dd.yy hh:mm")
parser.add_argument('-s', '--site', dest='location', help="Telescope")
parser.add_argument('-k', '--keep-idle-times', dest='keepIdle', help="Keep idle times(?)")
parser.add_argument('-I', '--iterations', dest='initialRetries', type=int, help="Number of iterations")
parser.add_argument('-T', '--penalty-non-optimal', dest='timePenalty', help="Penalty for non-optimal observing time")
parser.add_argument('-D', '--penalty-drop-pulsar', dest='dropPenalty', type=int, help="Penalty for dropping a pulsar from the schedule", default=500)
parser.add_argument('-c', '--observing-command', dest='observationCmd', help="Custom observing command")
parser.add_argument('-i', '--input-schedule', dest='inputPath', help="Input schedule")
parser.add_argument('-v', '--verbosity', dest='verbose', type=int)
parser.add_argument('-l', '--log-file', dest='logPath', help="Logfile")
parser.add_argument('-O', '--observe', dest='observe', action ='store_true',
help="Run the created schedule immediately. This changes the format of the schedule file!")
parser.add_argument('inputList', metavar='INPUT_FILE', nargs=1, help="Input file with a list of pulsars")
args = parser.parse_args()
if args.outputPath :
outputPath = args.outputPath
if args.inputPath :
inputPath = args.inputPath
if args.initialRetries :
initialRetries = args.initialRetries
if args.begin :
begin = dateutil.parser.parse(args.begin[0] + " " + args.begin[1])
if args.end :
end = dateutil.parser.parse(args.end[0] + " " + args.end[1])
if args.deadline :
deadline = dateutil.parser.parse(args.deadline[0] + " " + args.deadline[1])
if args.location :
location = args.location
if args.keepIdle :
keepIdle = args.keepIdle
if args.timePenalty :
timePenalty = args.timePenalty
if args.dropPenalty :
dropPenalty = args.dropPenalty
if args.observationCmd :
observationCmd = args.observationCmd
if args.verbose :
verbose = args.verbose
if args.logPath :
logPath = args.logPath
inputList = args.inputList[0]
# copy stdout to log file
log = os.path.expanduser(logPath + os.path.sep +"observePulsars."+ str(datetime.datetime.now()) +".log")
safeMakedirs(logPath, 0755)
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
tee = subprocess.Popen(["tee", log], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
if verbose > 0:
print "================="
print "observePulsars.py"
print "================="
if verbose > 0:
print 'Reading desired observations from', inputList,"...",
with open(inputList) as f:
lines = f.readlines()
for line in lines:
if not line.startswith('#'):
observationList.append(line.strip().split())
if verbose > 0:
print "done."
if inputPath != None:
schedule = []
if verbose > 0:
print 'Reading schedule from', inputPath,"...",
with open(inputPath) as f:
lines = f.readlines()
for line in lines:
if not line.startswith('#'):
(pulsar, start, stop, optimalUTC, priority) = line.strip().split("\t")
schedule.append((pulsar, dateutil.parser.parse(start), dateutil.parser.parse(stop), dateutil.parser.parse(optimalUTC), int(priority)))
if verbose > 0:
print "done."
if len(observationList) > 0:
if verbose>0:
print "Creating schedule..."
site = sites.get(location)
schedule = makeSchedule(observationList,site, begin, end,
deadline,timePenalty,idlePenalty,dropPenalty,initialRetries)
# Determine dropped observations
dropObservationList = list(observationList)
for (pulsar, start, stop, optimalUTC, priority) in schedule:
dur = (stop-start).seconds / 60
for line in dropObservationList:
(pulsarL, durL, priorityL) = line
if pulsarL == pulsar and str(dur) == durL and priority == priorityL:
dropObservationList.remove(line)
break
# print intermediate schedule
if verbose > 1:
printSchedule(schedule, dropObservationList, timePenalty, dropPenalty,
"This is the best schedule (with idle times):")
if verbose > 1 and not keepIdle:
plotschedule(begin, end, deadline, schedule, "Best Schedule (with idle"
" times)", True, True )
if not keepIdle:
if verbose > 0:
print "Extending observations to fill up idle time..."
schedule = fillIdleTimes(begin, end, deadline, schedule)
# print final schedule
if verbose > 0:
printSchedule(schedule, dropObservationList, timePenalty, dropPenalty, "This is the final schedule:")
plotschedule(begin, end, deadline, schedule, "Final Schedule", True,
True )
# write schedule to file
with open(outputPath, 'w') as f:
if verbose > 0:
print "Writing schedule to", outputPath
#f.write("# Pulsar \t Duration \t Start UTC \t Stop UTC \t Optimal UTC \t Priority\t Start LST \t Stop LST\n")
for (pulsar, start, stop, optimalUTC, priority) in schedule:
#3 lines inserted by STEFAN
start_date = matplotlib.dates.date2num(start)
stop_date = matplotlib.dates.date2num(stop)
duration = stop_date - start_date
# '"\t"+str(duration)+' as well as LST inserted by STEFAN
if args.observe:
line = pulsar+"\t"+str(duration)+"\t"+str(start)+"\t"+str(stop)+"\t"+str(optimalUTC)+"\t"+str(priority)+"\t"+site.localSiderialTime(start, returntype="string")+"\t"+site.localSiderialTime(stop, returntype="string")
else:
line = '# ' + pulsar + ' LST: ' + site.localSiderialTime(start, returntype="string")[0:5]
line += " UTC: " + str(start)[0:16] + "\n"
line += pulsar + " " + str(int(duration*24*60+0.5)-2) + " "
line += str(start)[0:16]
f.write(line+"\n")
# start observation
if args.observe:
if verbose > 0:
print "Start observing..."
observe(schedule, os.path.expanduser(observationCmd))
else:
if verbose > 0:
print "A script in a list format was written to " + outputPath
if __name__ == '__main__':
#main(sys.argv[1:])
main()
| gpl-2.0 |
inoue0124/TensorFlow_Keras | chapter4/leaky_relu_tensorflow.py | 1 | 2758 | import numpy as np
import tensorflow as tf
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
np.random.seed(0)
tf.set_random_seed(123)
'''
Generate data
'''
mnist = datasets.fetch_mldata('MNIST original', data_home='.')
n = len(mnist.data)
N = 10000 # use a part of the mnist
train_size = 0.8
indices = np.random.permutation(range(n))[:N] # choose random indices up to N
print (indices)
X = mnist.data[indices]
y = mnist.target[indices]
Y = np.eye(10)[y.astype(int)] # convert to 1-of-K
X_train, X_test, Y_train, Y_test =\
train_test_split(X, Y, train_size=train_size)
'''
Set the model
'''
n_in = len(X[0]) # 784
n_hidden = 200
n_out = len(Y[0]) # 10
def lrelu(x, alpha=0.01):
return tf.maximum(alpha*x,x)
x = tf.placeholder(tf.float32, shape=[None, n_in])
t = tf.placeholder(tf.float32, shape=[None, n_out])
W0 = tf.Variable(tf.truncated_normal([n_in, n_hidden], stddev=0.01))
b0 = tf.Variable(tf.zeros([n_hidden]))
h0 = lrelu(tf.matmul(x,W0) + b0)
W1 = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], stddev=0.01))
b1 = tf.Variable(tf.zeros([n_hidden]))
h1 = lrelu(tf.matmul(h0,W1) + b1)
W2 = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], stddev=0.01))
b2 = tf.Variable(tf.zeros([n_hidden]))
h2 = lrelu(tf.matmul(h1,W2) + b2)
W3 = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], stddev=0.01))
b3 = tf.Variable(tf.zeros([n_hidden]))
h3 = lrelu(tf.matmul(h2,W3) + b3)
W4 = tf.Variable(tf.truncated_normal([n_hidden, n_out], stddev=0.01))
b4 = tf.Variable(tf.zeros([n_out]))
y = tf.nn.softmax(tf.matmul(h3,W4) + b4)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(t*tf.log(y),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(t,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
'''
train setting
'''
epochs = 50
batch_size = 200
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
n_batches = (int)(N * train_size) // batch_size
for epoch in range(epochs):
X_, Y_ = shuffle(X_train, Y_train)
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
sess.run(train_step, feed_dict={x:X_[start:end],t:Y_[start:end]})
loss = cross_entropy.eval(session=sess,feed_dict={x:X_,t:Y_})
acc = accuracy.eval(session=sess, feed_dict={x:X_,t:Y_})
print('epoch:', epoch, ' loss:', loss, ' accuracy:', acc)
'''
evaluation of the model
'''
accuracy_rate = accuracy.eval(session=sess, feed_dict={x:X_test,t:Y_test})
print('accuracy: ', accuracy_rate)
| mit |
tanonev/codewebs | src/deprecated/visualize/ConnectedComponents.py | 1 | 3370 | '''
Created on Mar 12, 2013
@author: chrispiech
'''
import os
import numpy
import matplotlib
from pylab import *
import networkx as nx
from scipy.sparse import coo_matrix
from scipy.sparse import lil_matrix
from scipy.io import mmread, mmwrite
import itertools
from FileSystem import FileSystem
FULL_MATRIX_ZIP = 'ast_1_3.gz'
FULL_MATRIX = 'sparse10.mat'
SMALL_MATRIX = 'ast_1_1.txt.sparse10.mtx'
TEST_MATRIX = 'test.txt'
class Runner(object):
def createGraph(self, distanceMatrix):
graph = nx.Graph()
cx = coo_matrix(distanceMatrix)
for i,j,v in zip(cx.row, cx.col, cx.data):
if v > 0:
graph.add_edge(i, j, weight=v)
return graph
def filterBySimilarity(self, graph, maxDistance):
filteredGraph = nx.Graph()
for node in graph.nodes():
filteredGraph.add_node(node)
for edgeTuple in graph.edges():
start = edgeTuple[0]
end = edgeTuple[1]
weight = graph.edge[start][end]['weight']
if weight <= maxDistance and weight > 0:
attrDict = {'weight': weight}
filteredGraph.add_edge(start, end, attrDict)
return filteredGraph
def graphConnectedComponentsVsCutoff(self):
print 'graph connected components'
distanceMatrix = FileSystem.loadDistanceMatrix(FULL_MATRIX)
graph = self.createGraph(distanceMatrix)
for i in range(11, -1, -1):
filteredGraph = self.filterBySimilarity(graph, i)
components = nx.number_connected_components(filteredGraph)
print str(i) + '\t' + str(components)
def getAverageDegree(self, graph):
degrees = graph.degree()
return numpy.mean(degrees.values())
def getComponentSizes(self, components, submissionIdMap):
sizes = []
for component in components:
size = 0
for node in component.nodes():
numAsts = len(submissionIdMap[node])
size += numAsts
sizes.append(size)
return sorted(sizes, reverse=True)
def getStats(self):
print 'graph stats'
distanceMatrix = FileSystem.loadDistanceMatrix('ast_1_3.sparse10.mat')
submissionIdMap = FileSystem.loadSubmissionIdMap('ast_1_3')
graph = self.createGraph(distanceMatrix)
for i in range(11):
filteredGraph = self.filterBySimilarity(graph, i)
components = nx.connected_component_subgraphs(filteredGraph)
componentSizes = self.getComponentSizes(components, submissionIdMap)
numComponents = len(components)
degree = self.getAverageDegree(filteredGraph)
edges = nx.number_of_edges(filteredGraph)
toPrint = []
toPrint.append(i)
toPrint.append(numComponents)
toPrint.append(degree)
toPrint.append(edges)
toPrint.append(componentSizes[0])
toPrint.append(componentSizes[1])
toPrint.append(componentSizes[2])
string = ''
for elem in toPrint:
string += str(elem) + '\t'
print string
def run(self):
self.getStats()
print'done'
if __name__ == '__main__':
Runner().run() | mit |
evgchz/scikit-learn | setup.py | 12 | 5853 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
###############################################################################
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
if len(set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
)).intersection(sys.argv)) > 0:
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
###############################################################################
class CleanCommand(Clean):
description = "Remove build directories, and compiled file in the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
###############################################################################
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass={'clean': CleanCommand},
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
neilengineer/data-science-from-scratch | code/neural_networks.py | 54 | 6622 | from __future__ import division
from collections import Counter
from functools import partial
from linear_algebra import dot
import math, random
import matplotlib
import matplotlib.pyplot as plt
def step_function(x):
return 1 if x >= 0 else 0
def perceptron_output(weights, bias, x):
"""returns 1 if the perceptron 'fires', 0 if not"""
return step_function(dot(weights, x) + bias)
def sigmoid(t):
return 1 / (1 + math.exp(-t))
def neuron_output(weights, inputs):
return sigmoid(dot(weights, inputs))
def feed_forward(neural_network, input_vector):
"""takes in a neural network (represented as a list of lists of lists of weights)
and returns the output from forward-propagating the input"""
outputs = []
for layer in neural_network:
input_with_bias = input_vector + [1] # add a bias input
output = [neuron_output(neuron, input_with_bias) # compute the output
for neuron in layer] # for this layer
outputs.append(output) # and remember it
# the input to the next layer is the output of this one
input_vector = output
return outputs
def backpropagate(network, input_vector, target):
hidden_outputs, outputs = feed_forward(network, input_vector)
# the output * (1 - output) is from the derivative of sigmoid
output_deltas = [output * (1 - output) * (output - target[i])
for i, output in enumerate(outputs)]
# adjust weights for output layer (network[-1])
for i, output_neuron in enumerate(network[-1]):
for j, hidden_output in enumerate(hidden_outputs + [1]):
output_neuron[j] -= output_deltas[i] * hidden_output
# back-propagate errors to hidden layer
hidden_deltas = [hidden_output * (1 - hidden_output) *
dot(output_deltas, [n[i] for n in network[-1]])
for i, hidden_output in enumerate(hidden_outputs)]
# adjust weights for hidden layer (network[0])
for i, hidden_neuron in enumerate(network[0]):
for j, input in enumerate(input_vector + [1]):
hidden_neuron[j] -= hidden_deltas[i] * input
def patch(x, y, hatch, color):
"""return a matplotlib 'patch' object with the specified
location, crosshatch pattern, and color"""
return matplotlib.patches.Rectangle((x - 0.5, y - 0.5), 1, 1,
hatch=hatch, fill=False, color=color)
def show_weights(neuron_idx):
weights = network[0][neuron_idx]
abs_weights = map(abs, weights)
grid = [abs_weights[row:(row+5)] # turn the weights into a 5x5 grid
for row in range(0,25,5)] # [weights[0:5], ..., weights[20:25]]
ax = plt.gca() # to use hatching, we'll need the axis
ax.imshow(grid, # here same as plt.imshow
cmap=matplotlib.cm.binary, # use white-black color scale
interpolation='none') # plot blocks as blocks
# cross-hatch the negative weights
for i in range(5): # row
for j in range(5): # column
if weights[5*i + j] < 0: # row i, column j = weights[5*i + j]
# add black and white hatches, so visible whether dark or light
ax.add_patch(patch(j, i, '/', "white"))
ax.add_patch(patch(j, i, '\\', "black"))
plt.show()
if __name__ == "__main__":
raw_digits = [
"""11111
1...1
1...1
1...1
11111""",
"""..1..
..1..
..1..
..1..
..1..""",
"""11111
....1
11111
1....
11111""",
"""11111
....1
11111
....1
11111""",
"""1...1
1...1
11111
....1
....1""",
"""11111
1....
11111
....1
11111""",
"""11111
1....
11111
1...1
11111""",
"""11111
....1
....1
....1
....1""",
"""11111
1...1
11111
1...1
11111""",
"""11111
1...1
11111
....1
11111"""]
def make_digit(raw_digit):
return [1 if c == '1' else 0
for row in raw_digit.split("\n")
for c in row.strip()]
inputs = map(make_digit, raw_digits)
targets = [[1 if i == j else 0 for i in range(10)]
for j in range(10)]
random.seed(0) # to get repeatable results
input_size = 25 # each input is a vector of length 25
num_hidden = 5 # we'll have 5 neurons in the hidden layer
output_size = 10 # we need 10 outputs for each input
# each hidden neuron has one weight per input, plus a bias weight
hidden_layer = [[random.random() for __ in range(input_size + 1)]
for __ in range(num_hidden)]
# each output neuron has one weight per hidden neuron, plus a bias weight
output_layer = [[random.random() for __ in range(num_hidden + 1)]
for __ in range(output_size)]
# the network starts out with random weights
network = [hidden_layer, output_layer]
# 10,000 iterations seems enough to converge
for __ in range(10000):
for input_vector, target_vector in zip(inputs, targets):
backpropagate(network, input_vector, target_vector)
def predict(input):
return feed_forward(network, input)[-1]
for i, input in enumerate(inputs):
outputs = predict(input)
print i, [round(p,2) for p in outputs]
print """.@@@.
...@@
..@@.
...@@
.@@@."""
print [round(x, 2) for x in
predict( [0,1,1,1,0, # .@@@.
0,0,0,1,1, # ...@@
0,0,1,1,0, # ..@@.
0,0,0,1,1, # ...@@
0,1,1,1,0]) # .@@@.
]
print
print """.@@@.
@..@@
.@@@.
@..@@
.@@@."""
print [round(x, 2) for x in
predict( [0,1,1,1,0, # .@@@.
1,0,0,1,1, # @..@@
0,1,1,1,0, # .@@@.
1,0,0,1,1, # @..@@
0,1,1,1,0]) # .@@@.
]
print
| unlicense |
profxj/old_xastropy | xastropy/PH136/exercises/solarspec_exerc.py | 7 | 5163 | """Module to perform the Solar Spectrum exercise exercise for PH136.
Currently tuned to Lick spectra from Jan 2013
"""
# Import libraries
import numpy as np
from astropy.io import fits
from matplotlib import pyplot
import pdb
#####################################
# Show a 1D spectrum
# Useful to eyeball the pixel values of a few key lines
# import xastropy.PH136.exercises.solarspec_exerc as ssp
# reload(ssp)
# ssp.plot_spec('b1014.fits')
def plot_spec(fits_fil,prow=None,give_spec=False, noplot=False):
from astropy.io.fits import getdata
# Read
arr,head = getdata(fits_fil,0,header=True)
siz = arr.shape
if prow == None:
prow = int(siz[0]/2.)
# Define the spectrum
spec = arr[prow,:]
npix = len(spec)
#pdb.set_trace()
# Plot
if noplot:
pyplot.clf()
pyplot.plot(np.arange(npix), spec)
pyplot.show()
if give_spec:
return spec
else:
return
#pdb.set_trace()
################################################
# Define a Gaussian plus a floor offset
def gauss_off(x, Z, A, x0, sigma):
return Z + A*np.exp(- (x-x0)**2 / (2.*sigma**2) )
################################################
# Fit a wavelength solution to hard-coded values, and plot
def gaussfit_line(xval, yval, xguess, xrng=15.,debug=False):
from scipy.optimize import curve_fit
import xastropy.PH136.exercises.solarspec_exerc as ssp
# Grab the region to fit
gdx = np.where( np.fabs( xval-xguess ) < xrng )
newx = xval[gdx]
newy = yval[gdx]
# Guess
Aguess = np.max(newy)
sguess = 2.
Z = np.median(newy)
pguess = [Z, Aguess, xguess, sguess]
# Fit
popt, pcov = curve_fit(ssp.gauss_off, newx, newy, p0=pguess)
# Debug
if debug:
pyplot.clf()
#data
pyplot.plot(newx, newy, 'o')
#curve
xdum = np.linspace(np.min(newx), np.max(newx), num=300)
Z,A,x0,sigma = popt
ydum = Z + A*np.exp(- (xdum-x0)**2 / (2.*sigma**2) )
# plot
pyplot.plot(xdum, ydum)
pyplot.show()
return popt[2]
################################################
# Fit a wavelength solution using hard-coded guesses, and plot
# import xastropy.PH136.exercises.solarspec_exerc as ssp
# reload(ssp)
# fit = ssp.fit_lines(fits_fil='b1014.fits')
def fit_lines(fits_fil=None, xquery=None,show_plot=False, plot_spec=False):
import xastropy.PH136.exercises.solarspec_exerc as ssp
from astropy.io.fits import getdata
if fits_fil == None:
fits_fil = 'b1014.fits'
spec = ssp.plot_spec(fits_fil, give_spec=True)
npix = len(spec)
xpix = np.arange(npix)
# Generate the arrays
wav_val = np.array( [5085.82, 4799.92, 4358.33, 3466.55])
guess_pix_val = np.array( [1930.5, 1664.72, 1241.46, 316.8])
# Centroid those lines!
nlin = len(guess_pix_val)
pix_val = np.zeros(nlin)
ii=0
for gpix in guess_pix_val:
pix_val[ii] = ssp.gaussfit_line(xpix,spec,gpix)#,debug=True)
ii += 1
#pdb.set_trace()
# Fit
fit = np.polyfit(pix_val, wav_val, 2)
print 'Fit (dlam, w0): ', fit
# Setup for plot
pv = np.poly1d(fit)
xval = np.linspace(1., 2000, 100)
yval = pv(xval)
# Plot?
if show_plot:
pyplot.clf()
pyplot.plot(pix_val, wav_val, 'o')
pyplot.plot(xval, yval)
pyplot.xlabel('pixel')
pyplot.ylabel('Wave (Ang)')
#pyplot.show()
pyplot.savefig('arclin_fit.pdf')
# Plot the spectrum
if plot_spec and (fits_fil != None):
spec = ssp.plot_spec(fits_fil, give_spec=True, noplot=True)
npix = len(spec)
xval = np.arange(npix)
wave = pv(xval)
pyplot.clf()
pyplot.plot(wave, spec,drawstyle="steps-mid", ls='-')
pyplot.xlim([3000., 5500])
pyplot.xlabel('Wavelength (Ang)')
pyplot.ylabel('Counts')
pyplot.savefig('arclin_spec.pdf')
# Print a value
if xquery != None:
wquery = pv(xquery)
print 'Wavelength for pixel = ', xquery, ' is wave = ', wquery
return fit
################################################
# Extract and show the solar spectrum
# import xastropy.PH136.exercises.solarspec_exerc as ssp
# reload(ssp)
# ssp.sol_spec()
def sol_spec(fits_fil=None, xquery=None,show_plot=False, plot_spec=False, arc_fil=None):
import xastropy.PH136.exercises.solarspec_exerc as ssp
# Get wavelength solution
if arc_fil == None:
arc_fil = 'b1014.fits'
fit = ssp.fit_lines(fits_fil=arc_fil)
pv = np.poly1d(fit)
# Read Solar spectrum
if fits_fil == None:
fits_fil = 'b1029.fits'
spec = ssp.plot_spec(fits_fil, give_spec=True)
npix = len(spec)
xpix = np.arange(npix)
wave = pv(xpix)
# Plot
pyplot.clf()
pyplot.plot(wave, spec,drawstyle="steps-mid", ls='-')
pyplot.xlim([3000., 5500])
pyplot.xlabel('Wavelength (Ang)')
pyplot.ylabel('Counts')
# Ca lines
pyplot.axvline(3933.68, color='r')
pyplot.axvline(3968.47, color='r')
pyplot.show()
# Ca H+K
# 3955.5, 3991.
| bsd-3-clause |
av8ramit/tensorflow | tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py | 7 | 12158 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def generator_fn(noise_dict, mode):
del mode
noise = noise_dict['x']
return layers.fully_connected(noise, noise.shape[1].value)
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return layers.fully_connected(data, 1)
def mock_head(testcase, expected_generator_inputs, expected_real_data,
generator_scope_name):
"""Returns a mock head that validates logits values and variable names."""
discriminator_scope_name = 'Discriminator' # comes from TFGAN defaults
generator_var_names = set([
'%s/fully_connected/weights:0' % generator_scope_name,
'%s/fully_connected/biases:0' % generator_scope_name])
discriminator_var_names = set([
'%s/fully_connected/weights:0' % discriminator_scope_name,
'%s/fully_connected/biases:0' % discriminator_scope_name])
def _create_estimator_spec(features, mode, logits, labels):
gan_model = logits # renaming for clarity
is_predict = mode == model_fn_lib.ModeKeys.PREDICT
testcase.assertIsNone(features)
testcase.assertIsNone(labels)
testcase.assertIsInstance(gan_model, namedtuples.GANModel)
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
expected_var_names = (generator_var_names if is_predict else
generator_var_names | discriminator_var_names)
testcase.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
assertions = []
def _or_none(x):
return None if is_predict else x
testcase.assertEqual(expected_generator_inputs, gan_model.generator_inputs)
# TODO(joelshor): Add check on `generated_data`.
testcase.assertItemsEqual(
generator_var_names,
set([x.name for x in gan_model.generator_variables]))
testcase.assertEqual(generator_scope_name, gan_model.generator_scope.name)
testcase.assertEqual(_or_none(expected_real_data), gan_model.real_data)
# TODO(joelshor): Add check on `discriminator_real_outputs`.
# TODO(joelshor): Add check on `discriminator_gen_outputs`.
if is_predict:
testcase.assertIsNone(gan_model.discriminator_scope)
else:
testcase.assertEqual(discriminator_scope_name,
gan_model.discriminator_scope.name)
with ops.control_dependencies(assertions):
if mode == model_fn_lib.ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
mode=mode, loss=array_ops.zeros([]),
train_op=control_flow_ops.no_op(), training_hooks=[])
elif mode == model_fn_lib.ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data,
loss=array_ops.zeros([]))
elif mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data)
else:
testcase.fail('Invalid mode: {}'.format(mode))
head = test.mock.NonCallableMagicMock(spec=head_lib._Head)
head.create_estimator_spec = test.mock.MagicMock(
wraps=_create_estimator_spec)
return head
class GANModelFnTest(test.TestCase):
"""Tests that _gan_model_fn passes expected logits to mock head."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_logits_helper(self, mode):
"""Tests that the expected logits are passed to mock head."""
with ops.Graph().as_default():
training_util.get_or_create_global_step()
generator_inputs = {'x': array_ops.zeros([5, 4])}
real_data = (None if mode == model_fn_lib.ModeKeys.PREDICT else
array_ops.zeros([5, 4]))
generator_scope_name = 'generator'
head = mock_head(self,
expected_generator_inputs=generator_inputs,
expected_real_data=real_data,
generator_scope_name=generator_scope_name)
estimator_spec = estimator._gan_model_fn(
features=generator_inputs,
labels=real_data,
mode=mode,
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_scope_name=generator_scope_name,
head=head)
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=self._model_dir) as sess:
if mode == model_fn_lib.ModeKeys.TRAIN:
sess.run(estimator_spec.train_op)
elif mode == model_fn_lib.ModeKeys.EVAL:
sess.run(estimator_spec.loss)
elif mode == model_fn_lib.ModeKeys.PREDICT:
sess.run(estimator_spec.predictions)
else:
self.fail('Invalid mode: {}'.format(mode))
def test_logits_predict(self):
self._test_logits_helper(model_fn_lib.ModeKeys.PREDICT)
def test_logits_eval(self):
self._test_logits_helper(model_fn_lib.ModeKeys.EVAL)
def test_logits_train(self):
self._test_logits_helper(model_fn_lib.ModeKeys.TRAIN)
# TODO(joelshor): Add pandas test.
class GANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(
serialized_examples, feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
if __name__ == '__main__':
test.main()
| apache-2.0 |
SpatialTranscriptomicsResearch/st_analysis | scripts/st_data_plotter.py | 1 | 9620 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script that creates a scatter plot from one or more ST datasets in matrix format
(genes as columns and spots as rows)
The output will be an image file with the same name as the input file/s if no name if given.
It allows to choose transparency for the data points
It allows to pass images so the spots are plotted on top of it (an alignment file
can be passed along to convert spot coordinates to pixel coordinates)
It allows to normalize the counts using different algorithms
It allows to filter out by gene counts or gene names (following a reg-exp pattern)
what spots to plot
@Author Jose Fernandez Navarro <[email protected]>
"""
import argparse
import re
from matplotlib import pyplot as plt
from stanalysis.visualization import scatter_plot
from stanalysis.preprocessing import *
from stanalysis.alignment import parseAlignmentMatrix
import pandas as pd
import numpy as np
import os
import sys
def main(counts_table_files,
image_files,
alignment_files,
cutoff,
data_alpha,
dot_size,
normalization,
filter_genes,
outdir,
use_log_scale):
if len(counts_table_files) == 0 or \
any([not os.path.isfile(f) for f in counts_table_files]):
sys.stderr.write("Error, input file/s not present or invalid format\n")
sys.exit(1)
if image_files is not None and len(image_files) > 0 and \
len(image_files) != len(counts_table_files):
sys.stderr.write("Error, the number of images given as " \
"input is not the same as the number of datasets\n")
sys.exit(1)
if alignment_files is not None and len(alignment_files) > 0 \
and len(alignment_files) != len(image_files):
sys.stderr.write("Error, the number of alignments given as " \
"input is not the same as the number of images\n")
sys.exit(1)
if outdir is None or not os.path.isdir(outdir):
outdir = os.getcwd()
outdir = os.path.abspath(outdir)
print("Output directory {}".format(outdir))
print("Input datasets {}".format(" ".join(counts_table_files)))
# Merge input datasets (Spots are rows and genes are columns)
counts = aggregate_datatasets(counts_table_files)
print("Total number of spots {}".format(len(counts.index)))
print("Total number of genes {}".format(len(counts.columns)))
# Remove noisy spots and genes (Spots are rows and genes are columns)
counts = remove_noise(counts, 1 / 100.0, 1 / 100.0, min_expression=1)
# Normalization
print("Computing per spot normalization...")
counts = normalize_data(counts, normalization)
# Extract the list of the genes that must be shown
genes_to_keep = list()
if filter_genes:
for gene in counts.columns:
for regex in filter_genes:
if re.match(regex, gene):
genes_to_keep.append(gene)
break
else:
genes_to_keep = counts.columns
if len(genes_to_keep) == 0:
sys.stderr.write("Error, no genes found with the reg-exp given\n")
sys.exit(1)
# Create a scatter plot for each dataset
print("Plotting data...")
total_spots = counts.index
vmin = 10e6
vmax = -1
x_points = list()
y_points = list()
colors = list()
for i, name in enumerate(counts_table_files):
spots = filter(lambda x:'{}_'.format(i) in x, total_spots)
# Compute the expressions for each spot
# as the sum of all spots that pass the thresholds (Gene and counts)
x_points.append(list())
y_points.append(list())
colors.append(list())
for spot in spots:
tokens = spot.split("x")
assert(len(tokens) == 2)
y = float(tokens[1])
x = float(tokens[0].split("_")[1])
exp = sum(count for count in counts.loc[spot,genes_to_keep] if count > cutoff)
if exp > 0.0:
x_points[i].append(x)
y_points[i].append(y)
if use_log_scale: exp = np.log2(exp)
vmin = min(vmin, exp)
vmax = max(vmax, exp)
colors[i].append(exp)
for i, name in enumerate(counts_table_files):
if len(colors[i]) == 0:
sys.stdount.write("Warning, the gene/s given are not expressed in {}\n".format(name))
continue
# Retrieve alignment matrix and image if any
image = image_files[i] if image_files is not None \
and len(image_files) >= i else None
alignment = alignment_files[i] if alignment_files is not None \
and len(alignment_files) >= i else None
# alignment_matrix will be identity if alignment file is None
alignment_matrix = parseAlignmentMatrix(alignment)
# Create a scatter plot for the gene data
# If image is given plot it as a background
scatter_plot(x_points=x_points[i],
y_points=y_points[i],
colors=colors[i],
output=os.path.join(outdir, "{}.pdf".format(os.path.splitext(os.path.basename(name))[0])),
alignment=alignment_matrix,
cmap=plt.get_cmap("YlOrBr"),
title=name,
xlabel='X',
ylabel='Y',
image=image,
alpha=data_alpha,
size=dot_size,
show_legend=False,
show_color_bar=True,
vmin=vmin,
vmax=vmax)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--counts-table-files", required=True, nargs='+', type=str,
help="One or more matrices with gene counts per feature/spot (genes as columns)")
parser.add_argument("--alignment-files", default=None, nargs='+', type=str,
help="One or more tab delimited files containing and alignment matrix for the images as\n" \
"\t a11 a12 a13 a21 a22 a23 a31 a32 a33\n" \
"Only useful is the image has extra borders, for instance not cropped to the array corners\n" \
"or if you want the keep the original image size in the plots.")
parser.add_argument("--image-files", default=None, nargs='+', type=str,
help="When provided the data will plotted on top of the image\n" \
"It can be one ore more, ideally one for each input dataset\n " \
"It is desirable that the image is cropped to the array\n" \
"corners otherwise an alignment file is needed")
parser.add_argument("--cutoff",
help="Do not include genes that falls below this reads cut off per spot (default: %(default)s)",
type=float, default=0.0, metavar="[FLOAT]", choices=range(0, 100))
parser.add_argument("--data-alpha", type=float, default=1.0, metavar="[FLOAT]",
help="The transparency level for the data points, 0 min and 1 max (default: %(default)s)")
parser.add_argument("--dot-size", type=int, default=20, metavar="[INT]", choices=range(1, 100),
help="The size of the dots (default: %(default)s)")
parser.add_argument("--normalization", default="RAW", metavar="[STR]",
type=str,
choices=["RAW", "DESeq2", "DESeq2Linear", "DESeq2PseudoCount",
"DESeq2SizeAdjusted", "REL", "TMM", "RLE", "Scran"],
help="Normalize the counts using:\n" \
"RAW = absolute counts\n" \
"DESeq2 = DESeq2::estimateSizeFactors(counts)\n" \
"DESeq2PseudoCount = DESeq2::estimateSizeFactors(counts + 1)\n" \
"DESeq2Linear = DESeq2::estimateSizeFactors(counts, linear=TRUE)\n" \
"DESeq2SizeAdjusted = DESeq2::estimateSizeFactors(counts + lib_size_factors)\n" \
"RLE = EdgeR RLE * lib_size\n" \
"TMM = EdgeR TMM * lib_size\n" \
"Scran = Deconvolution Sum Factors (Marioni et al)\n" \
"REL = Each gene count divided by the total count of its spot\n" \
"(default: %(default)s)")
parser.add_argument("--show-genes", help="Regular expression for gene symbols to be shown\n" \
"If given only the genes matching the reg-exp will be shown.\n" \
"Can be given several times.",
default=None,
type=str,
action='append')
parser.add_argument("--outdir", default=None, help="Path to output dir")
parser.add_argument("--use-log-scale", action="store_true", default=False, help="Use log2(counts + 1) values")
args = parser.parse_args()
main(args.counts_table_files,
args.image_files,
args.alignment_files,
args.cutoff,
args.data_alpha,
args.dot_size,
args.normalization,
args.show_genes,
args.outdir,
args.use_log_scale)
| mit |
ueshin/apache-spark | python/pyspark/pandas/missing/series.py | 16 | 5929 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pyspark.pandas.missing import unsupported_function, unsupported_property, common
def _unsupported_function(method_name, deprecated=False, reason=""):
return unsupported_function(
class_name="pd.Series", method_name=method_name, deprecated=deprecated, reason=reason
)
def _unsupported_property(property_name, deprecated=False, reason=""):
return unsupported_property(
class_name="pd.Series", property_name=property_name, deprecated=deprecated, reason=reason
)
class MissingPandasLikeSeries(object):
# Functions
asfreq = _unsupported_function("asfreq")
autocorr = _unsupported_function("autocorr")
combine = _unsupported_function("combine")
convert_dtypes = _unsupported_function("convert_dtypes")
cov = _unsupported_function("cov")
ewm = _unsupported_function("ewm")
infer_objects = _unsupported_function("infer_objects")
interpolate = _unsupported_function("interpolate")
reorder_levels = _unsupported_function("reorder_levels")
resample = _unsupported_function("resample")
searchsorted = _unsupported_function("searchsorted")
set_axis = _unsupported_function("set_axis")
slice_shift = _unsupported_function("slice_shift")
to_hdf = _unsupported_function("to_hdf")
to_period = _unsupported_function("to_period")
to_sql = _unsupported_function("to_sql")
to_timestamp = _unsupported_function("to_timestamp")
tshift = _unsupported_function("tshift")
tz_convert = _unsupported_function("tz_convert")
tz_localize = _unsupported_function("tz_localize")
view = _unsupported_function("view")
# Deprecated functions
convert_objects = _unsupported_function("convert_objects", deprecated=True)
nonzero = _unsupported_function("nonzero", deprecated=True)
reindex_axis = _unsupported_function("reindex_axis", deprecated=True)
select = _unsupported_function("select", deprecated=True)
get_values = _unsupported_function("get_values", deprecated=True)
# Properties we won't support.
array = common.array(_unsupported_property)
duplicated = common.duplicated(_unsupported_property)
nbytes = _unsupported_property(
"nbytes",
reason="'nbytes' requires to compute whole dataset. You can calculate manually it, "
"with its 'itemsize', by explicitly executing its count. Use Spark's web UI "
"to monitor disk and memory usage of your application in general.",
)
# Functions we won't support.
memory_usage = common.memory_usage(_unsupported_function)
to_pickle = common.to_pickle(_unsupported_function)
to_xarray = common.to_xarray(_unsupported_function)
__iter__ = common.__iter__(_unsupported_function)
ravel = _unsupported_function(
"ravel",
reason="If you want to collect your flattened underlying data as an NumPy array, "
"use 'to_numpy().ravel()' instead.",
)
if LooseVersion(pd.__version__) < LooseVersion("1.0"):
# Deprecated properties
blocks = _unsupported_property("blocks", deprecated=True)
ftypes = _unsupported_property("ftypes", deprecated=True)
ftype = _unsupported_property("ftype", deprecated=True)
is_copy = _unsupported_property("is_copy", deprecated=True)
ix = _unsupported_property("ix", deprecated=True)
asobject = _unsupported_property("asobject", deprecated=True)
strides = _unsupported_property("strides", deprecated=True)
imag = _unsupported_property("imag", deprecated=True)
itemsize = _unsupported_property("itemsize", deprecated=True)
data = _unsupported_property("data", deprecated=True)
base = _unsupported_property("base", deprecated=True)
flags = _unsupported_property("flags", deprecated=True)
# Deprecated functions
as_blocks = _unsupported_function("as_blocks", deprecated=True)
as_matrix = _unsupported_function("as_matrix", deprecated=True)
clip_lower = _unsupported_function("clip_lower", deprecated=True)
clip_upper = _unsupported_function("clip_upper", deprecated=True)
compress = _unsupported_function("compress", deprecated=True)
get_ftype_counts = _unsupported_function("get_ftype_counts", deprecated=True)
get_value = _unsupported_function("get_value", deprecated=True)
set_value = _unsupported_function("set_value", deprecated=True)
valid = _unsupported_function("valid", deprecated=True)
to_dense = _unsupported_function("to_dense", deprecated=True)
to_sparse = _unsupported_function("to_sparse", deprecated=True)
to_msgpack = _unsupported_function("to_msgpack", deprecated=True)
compound = _unsupported_function("compound", deprecated=True)
put = _unsupported_function("put", deprecated=True)
ptp = _unsupported_function("ptp", deprecated=True)
# Functions we won't support.
real = _unsupported_property(
"real",
reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead.",
)
| apache-2.0 |
AndreasMadsen/grace | Code/sAe.py | 1 | 7042 | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.optimize
# This method is used to enforce the dimensionality of matrices since NumPy is a
# bit aggressive about allowing operators over non-matching dimensions.
def save_as_figure(arr, filepath="output/frame.png"):
array = (arr - np.min(arr))/(np.max(arr)-np.min(arr))
# plt.imshow(array, interpolation='nearest', cmap=plt.cm.gray)
plt.imshow(array, cmap=plt.cm.gray)
plt.savefig(filepath)
print "Saving to ", filepath
def ASSERT_SIZE(matrix, shape):
if matrix.shape != shape:
raise AssertionError("Wrong shape: %s expexted: %s" %
(matrix.shape, shape))
# This wraps the parameters for the sparse autoencoder.
class SparseAutoEncoderOptions:
# These network parameters are specified by by Andrew Ng specifically for
# the MNIST data set here:
# [[http://ufldl.stanford.edu/wiki/index.php/Exercise:Vectorization]]
def __init__(self,
visible_size,
hidden_size,
sparsity = 0.1,
learning_rate = 3e-3,
beta = 3,
output_dir = "output",
max_iterations = 500):
self.visible_size = visible_size
self.hidden_size = hidden_size
self.sparsity_param = sparsity
self.learning_rate = learning_rate
self.beta = beta
self.output_dir = output_dir
self.max_iterations = max_iterations
class SparseAutoEncoderSolution:
def __init__(self, W1, W2, b1, b2):
self.W1 = W1
self.W2 = W2
self.b1 = b1
self.b2 = b2
# The SparseAutoEncoder object wraps all the data needed in order to train a
# sparse autoencoder. Its constructor takes a SparseAutoEncoderOptions and a
# v x m matrix where v is the size of the visible layer of the network.
class SparseAutoEncoder:
def __init__(self, options, data):
self.options = options
self.data = data
self.frame_number = 0
# Convert the matrices to a flat vector. This is needed by 'fmin_l_bfgs_b'.
def flatten(self, W1, W2, b1, b2):
return np.array(np.hstack([W1.ravel('F'), W2.ravel('F'),
b1.ravel('F'), b2.ravel('F')]), order='F')
# Convert the flat vector back to the W1, W2, b1, and b2 matrices.
def unflatten(self, theta):
hidden_size = self.options.hidden_size
visible_size = self.options.visible_size
hv = hidden_size * visible_size
W1 = theta[0:hv].reshape([hidden_size, visible_size], order='F')
W2 = theta[hv:2*hv].reshape([visible_size, hidden_size], order='F')
b1 = theta[2*hv:2*hv+hidden_size].reshape([hidden_size, 1], order='F')
b2 = theta[2*hv+hidden_size:].reshape([visible_size, 1], order='F')
return (W1, W2, b1, b2)
# Create the random values for the parameters to begin learning.
def initialize_parameters(self):
hidden_size = self.options.hidden_size
visible_size = self.options.visible_size
r = np.sqrt(6) / np.sqrt(hidden_size + visible_size + 1)
W1 = np.random.random([hidden_size, visible_size]) * 2 * r - r;
W2 = np.random.random([visible_size, hidden_size]) * 2 * r - r;
b1 = np.zeros([hidden_size, 1])
b2 = np.zeros([visible_size, 1])
return self.flatten(W1, W2, b1, b2)
# <div class='math'>1/(1 + e^{-x})</div>
def sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
# ==Forward pass==
# Note: even though the dimensionality doesn't match because <p>$$b1$$</p>
# is a vector, numpy will apply b1 to every column.
def feed_forward(self, x, W1, W2, b1, b2):
visible_size = self.options.visible_size
hidden_size = self.options.hidden_size
ASSERT_SIZE(W1, (hidden_size, visible_size))
m = x.shape[1]
z2 = np.dot(W1, x) + b1
a2 = self.sigmoid(z2)
ASSERT_SIZE(a2, (hidden_size, m))
z3 = np.dot(W2, a2) + b2 # W2 * a2 + b2
a3 = self.sigmoid(z3)
ASSERT_SIZE(a3, (visible_size, m))
return a2, a3
# Compute the cost function J and the gradient for an input. Note that this
# takes a flattened W1, W2, b1, b2 because of fmin_l_bfgs_b.
def sparse_autoencoder(self, theta):
visible_size = self.options.visible_size
hidden_size = self.options.hidden_size
lamb = self.options.learning_rate
rho = self.options.sparsity_param
beta = self.options.beta
x = self.data
m = x.shape[1]
W1, W2, b1, b2 = self.unflatten(theta)
ASSERT_SIZE(W1, (hidden_size, visible_size))
ASSERT_SIZE(W2, (visible_size, hidden_size))
ASSERT_SIZE(b1, (hidden_size, 1))
ASSERT_SIZE(b2, (visible_size, 1))
if self.frame_number % 100 == 0:
save_as_figure(W1.T,
"%s/w1frame%03d.png" % (self.options.output_dir,
self.frame_number))
save_as_figure(W2.T,
"%s/w2frame%03d.png" % (self.options.output_dir,
self.frame_number))
self.frame_number += 1
a2, a3 = self.feed_forward(x, W1, W2, b1, b2)
# Compute average activation for an edge over all data
rho_hat = np.mean(a2, 1)[:, np.newaxis]
ASSERT_SIZE(rho_hat, (hidden_size, 1))
kl = rho*np.log(rho/rho_hat) + (1-rho)*np.log((1-rho)/(1-rho_hat))
cost = 0.5/m * np.sum((a3 - x)**2) + \
(lamb/2.)*(np.sum(W1**2) + np.sum(W2**2)) + \
beta*np.sum(kl)
# We set <span class='math'>y</span> equal to the input since we're learning
# an identity function
y = x
delta3 = -(y - a3) * a3*(1-a3)
ASSERT_SIZE(delta3, (visible_size, m))
sparsity = -rho/rho_hat + (1-rho)/(1-rho_hat)
ASSERT_SIZE(sparsity, (hidden_size, 1))
delta2 = (np.dot(W2.T, delta3) + beta * sparsity) * a2 * (1-a2)
ASSERT_SIZE(delta2, (hidden_size, m))
W2_grad = 1./m * np.dot(delta3, a2.T) + lamb * W2
ASSERT_SIZE(W2_grad, (visible_size, hidden_size))
# [:, newaxis] makes this into a matrix
b2_grad = 1./m * np.sum(delta3, 1)[:, np.newaxis]
ASSERT_SIZE(b2_grad, (visible_size, 1))
# sum the rows of delta3 and then mult by 1/m
W1_grad = 1./m * np.dot(delta2, x.T) + lamb * W1
ASSERT_SIZE(W1_grad, (hidden_size, visible_size))
b1_grad = 1./m * np.sum(delta2, 1)[:, np.newaxis]
ASSERT_SIZE(b1_grad, (hidden_size, 1))
grad = self.flatten(W1_grad, W2_grad, b1_grad, b2_grad)
return (cost, grad)
# Actually run gradient descent. Call mySparseAutoEncoder.learn() to learn
# the parameters of W1, W2, b1, and b2 for this network and this data.
def learn(self):
def f(theta):
return self.sparse_autoencoder(theta)
theta = self.initialize_parameters()
same_theta = theta.copy()
x, f, d = scipy.optimize.fmin_l_bfgs_b(f, theta,
maxfun= self.options.max_iterations,
iprint=1, m=20)
W1, W2, b1, b2 = self.unflatten(x)
save_as_figure(W1.T, "%s/network.png" % self.options.output_dir)
return SparseAutoEncoderSolution(W1, W2, b1, b2)
| mit |
giacomov/astromodels | astromodels/functions/template_model.py | 2 | 19276 | import collections
import astropy.units as u
import numpy as np
import os
import pandas as pd
from pandas.api.types import infer_dtype
import re
import scipy.interpolate
import warnings
from pandas import HDFStore
from astromodels.core.parameter import Parameter
from astromodels.functions.function import Function1D, FunctionMeta
from astromodels.utils.configuration import get_user_data_path
# A very small number which will be substituted to zero during the construction
# of the templates
_TINY_ = 1e-50
__all__ = ["IncompleteGrid", "ValuesNotInGrid", "MissingDataFile", "TemplateModelFactory", "TemplateModel"]
class IncompleteGrid(RuntimeError):
pass
class ValuesNotInGrid(ValueError):
pass
class MissingDataFile(RuntimeError):
pass
# This dictionary will keep track of the new classes already created in the current session
_classes_cache = {}
class TemplateModelFactory(object):
def __init__(self, name, description, energies, names_of_parameters,
interpolation_degree=1, spline_smoothing_factor=0):
# Store model name
# Enforce that it does not contain spaces nor strange characters
name = str(name)
if re.match("[a-zA-Z_][a-zA-Z0-9_]*", name) is None:
raise RuntimeError("The provided name '%s' is not a valid name. You cannot use spaces, "
"or special characters")
self._name = name
self._description = str(description)
# Store energy grid
if not isinstance(energies, u.Quantity):
warnings.warn("Energy unit is not a Quantity instance, so units has not been provided. Using keV.")
energies = energies * u.keV
self._energies = np.array(energies.to(u.keV).value)
# Enforce that they are ordered
self._energies.sort()
# We create a dictionary which will contain the grid for each parameter
self._parameters_grids = collections.OrderedDict()
for parameter_name in names_of_parameters:
self._parameters_grids[parameter_name] = None
self._data_frame = None
self._multi_index = None
self._interpolators = None
self._interpolation_degree = interpolation_degree
self._spline_smoothing_factor = int(spline_smoothing_factor)
def define_parameter_grid(self, parameter_name, grid):
assert parameter_name in self._parameters_grids, "Parameter %s is not part of this model" % parameter_name
grid_ = np.array(grid)
assert grid_.shape[0] > 1, "A grid for a parameter must contain at least two elements"
# Assert that elements are unique
assert np.all(np.unique(grid_) == grid_), "Non-unique elements in grid for parameter %s" % parameter_name
self._parameters_grids[parameter_name] = grid_
def add_interpolation_data(self, differential_fluxes, **parameters_values_input):
# Verify that the grid has been defined for all parameters
for grid in self._parameters_grids.values():
if grid is None:
raise IncompleteGrid("You need to define a grid for all parameters, by using the "
"define_parameter_grid method.")
if self._data_frame is None:
# This is the first data set, create the data frame
# Create the multi-index
self._multi_index = pd.MultiIndex.from_product(self._parameters_grids.values(),
names=self._parameters_grids.keys())
# Pre-fill the data matrix with nans, so we will know if some elements have not been filled
self._data_frame = pd.DataFrame(index=self._multi_index, columns=self._energies)
# Make sure we have all parameters and order the values in the same way as the dictionary
parameters_values = np.zeros(len(self._parameters_grids)) * np.nan
for key in parameters_values_input:
assert key in self._parameters_grids, "Parameter %s is not known" % key
idx = self._parameters_grids.keys().index(key)
parameters_values[idx] = parameters_values_input[key]
# If the user did not specify one of the parameters, then the parameters_values array will contain nan
assert np.all(np.isfinite(parameters_values)), "You didn't specify all parameters' values."
# Make sure we are dealing with pure numpy arrays (list and astropy.Quantity instances will be transformed)
# First we transform the input into a u.Quantity (if it's not already)
if not isinstance(differential_fluxes, u.Quantity):
differential_fluxes = np.array(differential_fluxes) * 1 / (u.keV * u.s * u.cm ** 2) # type: u.Quantity
# Then we transform it in the right units and we cast it back to a pure np.array
differential_fluxes = np.array(differential_fluxes.to(1 / (u.keV * u.s * u.cm ** 2)).value)
# Now let's check for valid inputs
assert self._energies.shape[0] == differential_fluxes.shape[0], "Differential fluxes and energies must have " \
"the same number of elements"
# Check that the provided value does not contains nan, inf nor zero (as the interpolation happens in the
# log space)
assert np.all(np.isfinite(differential_fluxes)), "You have invalid values in the differential flux (nan or inf)"
assert np.all(differential_fluxes >= 0), "You have negative values in the differential flux (which is of " \
"course impossible)"
if not np.all(differential_fluxes > 0):
warnings.warn("You have zeros in the differential flux. Since the interpolation happens in the log space, "
"this cannot be accepted. We will substitute zeros with %g" % _TINY_)
idx = (differential_fluxes == 0) # type: np.ndarray
differential_fluxes[idx] = _TINY_
# Now set the corresponding values in the data frame
# Now set the values in the data frame
try:
self._data_frame.loc[tuple(parameters_values)] = pd.to_numeric(differential_fluxes)
except KeyError:
raise ValuesNotInGrid("The provided parameter values (%s) are not in the defined grid" % parameters_values)
@staticmethod
def _clean_cols_for_hdf(data):
types = data.apply(lambda x: infer_dtype(x.values))
for col in types.index:
data[col] = pd.to_numeric(data[col])
return data
def save_data(self, overwrite=False):
# First make sure that the whole data matrix has been filled
assert not self._data_frame.isnull().values.any(), "You have NaNs in the data matrix. Usually this means " \
"that you didn't fill it up completely, or that some of " \
"your data contains nans. Cannot save the file."
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % self._name))
# Check that it does not exists
if os.path.exists(filename_sanitized):
if overwrite:
try:
os.remove(filename_sanitized)
except:
raise IOError("The file %s already exists and cannot be removed (maybe you do not have "
"permissions to do so?). " % filename_sanitized)
else:
raise IOError("The file %s already exists! You cannot call two different "
"template models with the same name" % filename_sanitized)
# Open the HDF5 file and write objects
with HDFStore(filename_sanitized) as store:
# The _clean_cols_for_hdf is needed because for some reasons the format of some columns
# is not accepted by .to_hdf otherwise
self._clean_cols_for_hdf(self._data_frame).to_hdf(store, 'data_frame')
store.get_storer('data_frame').attrs.metadata = {'description': self._description,
'name': self._name,
'interpolation_degree': int(self._interpolation_degree),
'spline_smoothing_factor': self._spline_smoothing_factor
}
for i, parameter_name in enumerate(self._parameters_grids.keys()):
store['p_%i_%s' % (i, parameter_name)] = pd.Series(self._parameters_grids[parameter_name])
store['energies'] = pd.Series(self._energies)
# This adds a method to a class at runtime
def add_method(self, method, name=None):
if name is None:
name = method.func_name
setattr(self.__class__, name, method)
class RectBivariateSplineWrapper(object):
"""
Wrapper around RectBivariateSpline, which supplies a __call__ method which accept the same
syntax as the other interpolation methods
"""
def __init__(self, *args, **kwargs):
# We can use interp2, which features spline interpolation instead of linear interpolation
self._interpolator = scipy.interpolate.RectBivariateSpline(*args, **kwargs)
def __call__(self, x):
res = self._interpolator(*x)
return res[0][0]
class TemplateModel(Function1D):
r"""
description :
A template model
latex : $n.a.$
parameters :
K :
desc : Normalization (freeze this to 1 if the template provides the normalization by itself)
initial value : 1.0
scale :
desc : Scale for the independent variable. The templates are handled as if they contains the fluxes
at E = scale * x.This is useful for example when the template describe energies in the rest
frame, at which point the scale describe the transformation between rest frame energy and
observer frame energy. Fix this to 1 to neutralize its effect.
initial value : 1.0
min : 1e-5
"""
__metaclass__ = FunctionMeta
def _custom_init_(self, model_name, other_name=None,log_interp = True):
"""
Custom initialization for this model
:param model_name: the name of the model, corresponding to the root of the .h5 file in the data directory
:param other_name: (optional) the name to be used as name of the model when used in astromodels. If None
(default), use the same name as model_name
:return: none
"""
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
if other_name is None:
super(TemplateModel, self).__init__(name, function_definition, parameters)
else:
super(TemplateModel, self).__init__(other_name, function_definition, parameters)
# Finally prepare the interpolators
self._prepare_interpolators(log_interp)
def _prepare_interpolators(self, log_interp):
# Figure out the shape of the data matrices
data_shape = map(lambda x: x.shape[0], self._parameters_grids.values())
self._interpolators = []
for energy in self._energies:
# Make interpolator for this energy
# NOTE: we interpolate on the logarithm
# unless specified
if log_interp:
this_data = np.array(np.log10(self._data_frame[energy].values).reshape(*data_shape), dtype=float)
self._is_log10 = True
else:
# work in linear space
this_data = np.array(self._data_frame[energy].values.reshape(*data_shape), dtype=float)
self._is_log10 = False
if len(self._parameters_grids.values()) == 2:
x, y = self._parameters_grids.values()
# Make sure that the requested polynomial degree is less than the number of data sets in
# both directions
msg = "You cannot use an interpolation degree of %s if you don't provide at least %s points " \
"in the %s direction. Increase the number of templates or decrease the interpolation " \
"degree."
if len(x) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree+1, 'x'))
if len(y) <= self._interpolation_degree:
raise RuntimeError(msg % (self._interpolation_degree, self._interpolation_degree + 1, 'y'))
this_interpolator = RectBivariateSplineWrapper(x, y, this_data,
kx=self._interpolation_degree,
ky=self._interpolation_degree,
s=self._spline_smoothing_factor)
else:
# In more than 2d we can only use linear interpolation
this_interpolator = scipy.interpolate.RegularGridInterpolator(self._parameters_grids.values(),
this_data)
self._interpolators.append(this_interpolator)
def _set_units(self, x_unit, y_unit):
self.K.unit = y_unit
self.scale.unit = 1 / x_unit
# This function will be substituted during construction by another version with
# all the parameters of this template
def evaluate(self, x, K, scale, *args):
return K * self._interpolate(x, scale, args)
def _interpolate(self, energies, scale, parameters_values):
if isinstance(energies, u.Quantity):
# Templates are always saved with energy in keV. We need to transform it to
# a dimensionless quantity (actually we take the .value property) because otherwise
# the logarithm below will fail.
energies = np.array(energies.to('keV').value, ndmin=1, copy=False, dtype=float)
# Same for the scale
scale = scale.to(1 / u.keV).value
if self._is_log10:
log_energies = np.log10(energies)
else:
log_energies = energies
e_tilde = self._energies * scale
# Gather all interpolations for these parameters' values at all defined energies
# (these are the logarithm of the values)
# note that if these are not logged, then the name is superflous
log_interpolations = np.array(map(lambda i:self._interpolators[i](np.atleast_1d(parameters_values)),
range(self._energies.shape[0])))
# Now interpolate the interpolations to get the flux at the requested energies
# NOTE: the variable "interpolations" contains already the log10 of the values,
if self._is_log10:
interpolator = scipy.interpolate.InterpolatedUnivariateSpline(np.log10(e_tilde),
log_interpolations,
k=self._interpolation_degree,
ext=0)
values = np.power(10, interpolator(log_energies))
else:
interpolator = scipy.interpolate.InterpolatedUnivariateSpline(e_tilde,
log_interpolations,
k=self._interpolation_degree,
ext=0)
values = interpolator(log_energies)
# The division by scale results from the differential:
# E = e * scale
# de = dE / scale
# dN / dE = dN / de * de / dE = dN / de * (1 / scale)
# NOTE: the units are added back through the multiplication by K in the evaluate method
return values / scale
@property
def data_file(self):
return self._data_file
def to_dict(self, minimal=False):
data = super(Function1D, self).to_dict(minimal)
# if not minimal:
#
# data['extra_setup'] = {'data_file': self._data_file}
return data
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/examples/plot_pls.py | 2 | 4630 | """
=========================
PLS Partial Least Squares
=========================
Simple usage of various PLS flavor:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximaly correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn.pls import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
pl.figure(figsize=(12, 8))
pl.subplot(221)
pl.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
pl.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
pl.xlabel("x scores")
pl.ylabel("y scores")
pl.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
pl.xticks(())
pl.yticks(())
pl.legend(loc="best")
pl.subplot(224)
pl.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
pl.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
pl.xlabel("x scores")
pl.ylabel("y scores")
pl.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
pl.xticks(())
pl.yticks(())
pl.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
pl.subplot(222)
pl.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
pl.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
pl.xlabel("X comp. 1")
pl.ylabel("X comp. 2")
pl.title('X comp. 1 vs X comp. 2 (test corr = %.2f)' % \
np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
pl.legend(loc="best")
pl.xticks(())
pl.yticks(())
pl.subplot(223)
pl.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
pl.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
pl.xlabel("Y comp. 1")
pl.ylabel("Y comp. 2")
pl.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)' % \
np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
pl.legend(loc="best")
pl.xticks(())
pl.yticks(())
pl.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| unlicense |
Karel-van-de-Plassche/QLKNN-develop | qlknn/plots/hyperpar_scan.py | 1 | 5472 | import re
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
from matplotlib import gridspec
from peewee import AsIs, JOIN, prefetch, SQL
from IPython import embed
from bokeh.layouts import row, column
from bokeh.plotting import figure, show, output_file
from bokeh.transform import linear_cmap
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label, Rect, HoverTool, Div
from qlknn.NNDB.model import Network, PureNetworkParams, PostprocessSlice, NetworkMetadata, TrainMetadata, Postprocess, db, Hyperparameters
from qlknn.plots.statistical_spread import get_base_stats
from qlknn.misc.to_precision import to_precision
# First, get some statistics
target_names = ['efeTEM_GB']
hyperpars = ['cost_stable_positive_scale', 'cost_l2_scale']
#hyperpars = ['cost_stable_positive_scale', 'cost_stable_positive_offset']
goodness_pars = ['rms', 'no_pop_frac', 'no_thresh_frac', 'pop_abs_mis_median', 'thresh_rel_mis_median', 'wobble_qlkunstab']
try:
report = get_base_stats(target_names, hyperpars, goodness_pars)
except Network.DoesNotExist:
report = pd.DataFrame(columns=goodness_pars, index=['mean', 'stddev', 'stderr'])
query = (Network.select(Network.id.alias('network_id'),
PostprocessSlice,
Postprocess.rms,
Hyperparameters
)
.join(PostprocessSlice, JOIN.LEFT_OUTER)
.switch(Network)
.join(Postprocess, JOIN.LEFT_OUTER)
.switch(Network)
.where(Network.target_names == target_names)
.switch(Network)
.join(PureNetworkParams)
.join(Hyperparameters)
.where(Hyperparameters.cost_stable_positive_offset.cast('numeric') == -5)
.where(Hyperparameters.cost_stable_positive_function == 'block')
)
if query.count() > 0:
results = list(query.dicts())
df = pd.DataFrame(results)
#df['network'] = df['network'].apply(lambda el: 'pure_' + str(el))
#df['l2_norm'] = df['l2_norm'].apply(np.nanmean)
df.drop(['id', 'network'], inplace=True, axis='columns')
df.set_index('network_id', inplace=True)
stats = df
stats = stats.applymap(np.array)
stats = stats.applymap(lambda x: x[0] if isinstance(x, np.ndarray) and len(x) == 1 else x)
stats.dropna(axis='columns', how='all', inplace=True)
stats.dropna(axis='rows', how='all', inplace=True)
stats = stats.loc[:, hyperpars + goodness_pars]
stats.reset_index(inplace=True)
#stats.set_index(hyperpars, inplace=True)
#stats.sort_index(ascending=False, inplace=True)
#stats = stats.groupby(level=list(range(len(stats.index.levels)))).mean() #Average equal hyperpars
#stats.reset_index(inplace=True)
aggdict = {'network_id': lambda x: tuple(x)}
aggdict.update({name: 'mean' for name in goodness_pars})
stats_mean = stats.groupby(hyperpars).agg(aggdict)
aggdict.update({name: 'std' for name in goodness_pars})
stats_std = stats.groupby(hyperpars).agg(aggdict)
stats = stats_mean.merge(stats_std, left_index=True, right_index=True, suffixes=('', '_std'))
stats.reset_index(inplace=True)
for name in hyperpars:
stats[name] = stats[name].apply(str)
for name in goodness_pars:
fmt = lambda x: '' if np.isnan(x) else to_precision(x, 4)
fmt_mean = stats[name].apply(fmt)
stats[name + '_formatted'] = fmt_mean
fmt = lambda x: '' if np.isnan(x) else to_precision(x, 2)
fmt_std = stats[name + '_std'].apply(fmt)
prepend = lambda x: '+- ' + x if x != '' else x
stats[name + '_std_formatted'] = fmt_std.apply(prepend)
x = np.unique(stats[hyperpars[1]].values)
x = sorted(x, key=lambda x: float(x))
y = np.unique(stats[hyperpars[0]].values)
y = sorted(y, key=lambda x: float(x))
source = ColumnDataSource(stats)
plotmode = 'bokehz'
hover = HoverTool(tooltips=[
("network_id", "@network_id"),
(hyperpars[0], '@' + hyperpars[0]),
(hyperpars[1], '@' + hyperpars[1])
])
plots = []
for statname in goodness_pars:
fmt = lambda x: '' if np.isnan(x) else to_precision(x, 2)
title = '{:s} (ref={:s}±{:s})'.format(statname,
fmt(report[statname]['mean']),
fmt(report[statname]['stddev'] + report[statname]['stderr']))
p = figure(title=title,
tools="tap", toolbar_location=None,
x_range=x, y_range=y)
p.add_tools(hover)
color = linear_cmap(statname, 'Viridis256', min(stats[statname]), max(stats[statname]))
p.rect(x=hyperpars[1], y=hyperpars[0],
width=1, height=1,
source=source,
fill_color=color, line_color=None,
nonselection_fill_alpha=0.4,
nonselection_fill_color=color,
)
non_selected = Rect(fill_alpha=0.8)
label_kwargs = dict(
x=hyperpars[1], y=hyperpars[0],
level='glyph',
source=source,
text_align='center',
text_color='red'
)
labels = LabelSet(
text=statname + '_formatted',
text_baseline='bottom',
**label_kwargs
)
labels_std = LabelSet(
text=statname + '_std_formatted',
text_baseline='top',
**label_kwargs
)
p.add_layout(labels)
p.add_layout(labels_std)
p.xaxis.axis_label = hyperpars[1]
p.yaxis.axis_label = hyperpars[0]
plots.append(p)
from bokeh.layouts import layout, widgetbox
title = Div(text=','.join(target_names))
l = layout([
[title],
[plots]
])
show(l)
| mit |
JacekPierzchlewski/RxCS | examples/signals/randMult_ex0.py | 1 | 3711 | """
This script is an example of how to use the Random Multitone Signal
Generator module. |br|
In this example 1 random multitone signal is generated. |br|
Time of the signal is 1 ms, the signal representation sampling frequency is
1 MHz. The highest possible frequency of a tone in the signal is 10 kHz,
the signal spectrum resolution is 1 kHz. |br|
The signal contains 1 random tone.
The noise is added to the signal, the SNR of the signal is 5 [dB]. |br|
After the generation, spectrum fo the signal is analyzed with an FFT
and ploted. The signal is also plotted in the time domain.
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <[email protected]>
*Version*:
1.0 | 21-MAY-2014 : * Version 1.0 released. |br|
1.1 | 15-JUL-2015 : * Adjusted to new name of random multitone gen. |br|
2.0 | 21-JUL-2015 : * Version 2.0 released (adjusted to v2.0 of the generator) |br|
2.0r1 | 04-AUG-2015 : * File name changed |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import rxcs
import numpy as np
import matplotlib.pyplot as plt
def _randMult_ex0():
# Put the generator on board
gen = rxcs.sig.randMult()
# Settings for the generator
gen.tS = 1e-3 # Time of the signal is 1 ms
gen.fR = 1e6 # The signal representation sampling frequency is 1 MHz
gen.fMax = 10e3 # The highest possible frequency in the signal is 10 kHz
gen.fRes = 1e3 # The signal spectrum resolution is 1 kHz
gen.nTones = 1 # The number of random tones
gen.iSNR = 5 # The noise added to the signal
# Run the generator and get the output
gen.run()
vSig = gen.mSig[0, :] # Get the generated signal
vTSig = gen.vTSig # Get the time vector of the signal
fFFTR = gen.fFFTR # Signal FFT frequency resolution
# -----------------------------------------------------------------
# Analyze the signal and plot it
vFFT = np.fft.fft(vSig) # Analyze the spectrum of the signal
iS = vFFT.size # Get the size of the spectrum
# Compute the amplitudes of tones
vFFTa = 2*np.abs(vFFT[np.arange(iS/2).astype(int)])/iS # Get
# Create a vector with frequencies of the signal spectrum
vF = fFFTR * np.arange(iS/2)
# -----------------------------------------------------------------
# Plot half of the spectrum
hFig1 = plt.figure(1)
hSubPlot1 = hFig1.add_subplot(111)
hSubPlot1.grid(True)
hSubPlot1.set_title('Spectrum of a random multitone signal')
hSubPlot1.set_xlabel('Frequency [Hz]')
(markerline, stemlines, baseline) = hSubPlot1.stem(vF, vFFTa,
linefmt='b-',
markerfmt='bo',
basefmt='r-')
hSubPlot1.set_xlim(-1*1e3, 51*1e3)
hSubPlot1.set_ylim(-0.1, 3.1)
plt.setp(stemlines, color='b', linewidth=2.0)
plt.setp(markerline, color='b', markersize=10.0)
# -----------------------------------------------------------------
# Plot signal in the time domain
hFig2 = plt.figure(2)
hSubPlot1 = hFig2.add_subplot(111)
hSubPlot1.grid(True)
hSubPlot1.set_title('Random multitone signal in the time domain')
hSubPlot1.set_xlabel('Frequency [Hz]')
hSubPlot1.plot(vTSig, vSig, 'b-')
# -----------------------------------------------------------------
plt.show(block=True)
# =====================================================================
# Trigger when start as a script
# =====================================================================
if __name__ == '__main__':
_randMult_ex0()
| bsd-2-clause |
hollerith/trading-with-python | spreadApp/makeDist.py | 77 | 1720 | from distutils.core import setup
import py2exe
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
import matplotlib
opts = {
'py2exe': {
"compressed": 1,
"bundle_files" : 3,
"includes" : ["sip",
"matplotlib.backends",
"matplotlib.backends.backend_qt4agg",
"pylab", "numpy",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2',
'_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll']
}
}
setup(name="triton",
version = "0.1",
scripts=["spreadScanner.pyw"],
windows=[{"script": "spreadScanner.pyw"}],
options=opts,
data_files=matplotlib.get_py2exe_datafiles(),
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="spreadDetective"))],
zipfile = None) | bsd-3-clause |
mugizico/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
embray/numpy | numpy/lib/twodim_base.py | 1 | 25933 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
__all__ = ['diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri',
'triu', 'tril', 'vander', 'histogram2d', 'mask_indices',
'tril_indices', 'tril_indices_from', 'triu_indices',
'triu_indices_from',
]
from numpy.core.numeric import (
asanyarray, subtract, arange, zeros, greater_equal, multiply, ones,
asarray, where,
)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal(subtract.outer(arange(N), arange(M)), -k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[-2], m.shape[-1], k=k, dtype=m.dtype), m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1 - tri(m.shape[-2], m.shape[-1], k - 1, dtype=m.dtype)), m)
return out
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, order='decreasing'):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `order` argument, either
"decreasing" (the default) or "increasing". Specifically, when
`order` is "decreasing", the `i`-th output column is the input vector
raised element-wise to the power of ``N - i - 1``. Such a matrix with
a geometric progression in each row is named for Alexandre-Theophile
Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
order : str, optional
Order of the powers of the columns. Must be either 'decreasing'
(the default) or 'increasing'.
Returns
-------
out : ndarray
Vandermonde matrix. If `order` is "decreasing", the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `order` is
"increasing", the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, order='increasing')
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
if order not in ['decreasing', 'increasing']:
raise ValueError("Invalid order %r; order must be either "
"'decreasing' or 'increasing'." % (order,))
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
if order == "decreasing":
powers = arange(N - 1, -1, -1)
else:
powers = arange(N)
V = x.reshape(-1, 1) ** powers
return V
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
The row dimension of the square arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n, tril, k)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0], k)
def triu_indices(n, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n, triu, k)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of a (N, N) array.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0], k)
| bsd-3-clause |
RashmiKumari/g_mmpbsa | tools/MmPbSaStat_correlation.py | 1 | 17761 | #!/usr/bin/env python
#
# This file is part of g_mmpbsa.
#
# Authors: Rashmi Kumari and Andrew Lynn
# Contribution: Rajendra Kumar
#
# Copyright (C) 2013-2015 Rashmi Kumari and Andrew Lynn
#
# g_mmpbsa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# g_mmpbsa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with g_mmpbsa. If not, see <http://www.gnu.org/licenses/>.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
#
from __future__ import absolute_import, division, print_function
from builtins import range
from builtins import object
import re, sys
import numpy as np
import argparse
import os
import math
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
def main():
args = ParseOptions()
#File => Frame wise component energy
try:
frame_wise = open(args.outfr, 'w')
except:
raise IOError ('Could not open file {0} for writing. \n' .format(args.outfr))
frame_wise.write('#Time E_VdW_mm(Protein)\tE_Elec_mm(Protein)\tE_Pol(Protein)\tE_Apol(Protein)\tE_VdW_mm(Ligand)\tE_Elec_mm(Ligand)\tE_Pol(Ligand)\tE_Apol(Ligand)\tE_VdW_mm(Complex)\tE_Elec_mm(Complex)\tE_Pol(Complex)\tE_Apol(Complex)\tDelta_E_mm\tDelta_E_Pol\tDelta_E_Apol\tDelta_E_binding\n')
#Complex Energy
c = []
if args.multiple:
MmFile, PolFile, APolFile, K = ReadMetafile(args.metafile)
for i in range(len(MmFile)):
cTmp = Complex(MmFile[i],PolFile[i],APolFile[i],K[i])
cTmp.CalcEnergy(args,frame_wise,i)
c.append(cTmp)
else:
cTmp = Complex(args.molmech,args.polar,args.apolar)
cTmp.CalcEnergy(args,frame_wise,0)
c.append(cTmp)
#Summary in output files => "--outsum" and "--outmeta" file options
Summary_Output_File(c,args)
FitCoef_all = PlotCorr(c,args,args.corrplot)
PlotEnrgy(c,FitCoef_all, args, args.enplot)
def PlotEnrgy(c,FitCoef_all, args, fname):
CompEn, CompEnErr, ExpEn, CI =[], [], [], []
for i in range(len(c)):
CompEn.append(c[i].FinalAvgEnergy)
ExpEn.append(c[i].freeEn)
CompEnErr.append(c[i].StdErr)
CI.append(c[i].CI)
fig = plt.figure()
plt.subplots_adjust(left=0.15, right=0.9, top=0.9, bottom=0.15)
ax = fig.add_subplot(111)
CI = np.array(CI).T
#To plot data
ax.errorbar(ExpEn, CompEn, yerr=CI, fmt='o', ecolor='k',color='k',zorder=20000)
#To plot straight line having median correlation coefficiant
fit = np.polyfit(ExpEn, CompEn, 1)
fitCompEn = np.polyval(fit, ExpEn)
ax.plot(ExpEn,fitCompEn,color='k',lw=3, zorder=20000)
#To plot straight line having minimum correlation coefficiant
#fitCompEn = np.polyval(FitCoef[1], ExpEn)
#ax.plot(ExpEn,fitCompEn,color='g',lw=2)
#To plot straight line having maximum correlation coefficiant
#fitCompEn = np.polyval(FitCoef[2], ExpEn)
#ax.plot(ExpEn,fitCompEn,color='r',lw=2)
for i in range(len(FitCoef_all[0])):
fitCompEn = np.polyval( [FitCoef_all[0][i], FitCoef_all[1][i]], ExpEn)
ax.plot(ExpEn,fitCompEn,color='#BDBDBD', lw=0.5,zorder=1)
ax.set_xlabel('Experimental Free Energy (kJ/mol)',fontsize=24, fontname='Times new Roman')
ax.set_ylabel('Computational Binding Energy (kJ/mol)',fontsize=24, fontname='Times new Roman')
xtics=ax.get_xticks()
plt.xticks(xtics,fontsize=24, fontname='Times new Roman')
ytics=ax.get_yticks()
plt.yticks(ytics,fontsize=24, fontname='Times new Roman')
plt.savefig(fname,dpi=300, orientation='landscape')
def PlotCorr(c,args,fname):
CompEn, ExpEn =[], []
for i in range(len(c)):
CompEn.append(c[i].FinalAvgEnergy)
ExpEn.append(c[i].freeEn)
AvgEn = np.sort(c[i].AvgEnBS,kind='mergesort')
n = len(AvgEn)
div = int(n/21)
AvgEn = AvgEn[:n:div]
c[i].AvgEnBS = AvgEn
main_r = np.corrcoef([CompEn,ExpEn])[0][1]
r, FitCoef = [], []
Id_0_FitCoef, Id_1_FitCoef = [], []
f_corrdist = open(args.corrdist,'w')
#Bootstrap analysis for correlation coefficiant
nbstep = args.nbstep
for i in range(nbstep):
temp_x, temp_y = [], []
energy_idx = np.random.randint(0,22,size=len(c))
complex_idx = np.random.randint(0,len(c),size=len(c))
for j in range(len(complex_idx)):
temp_y.append(c[complex_idx[j]].AvgEnBS[energy_idx[j]])
temp_x.append(c[complex_idx[j]].freeEn)
rtmp = np.corrcoef([temp_x,temp_y])[0][1]
temp_x = np.array(temp_x)
temp_y = np.array(temp_y)
r.append(rtmp)
fit = np.polyfit(temp_x, temp_y, 1)
FitCoef.append(fit)
f_corrdist.write('{0}\n' .format(rtmp))
#Seprating Slope and intercept
Id_0_FitCoef = np.transpose(FitCoef)[0]
Id_1_FitCoef = np.transpose(FitCoef)[1]
#Calculating mode of coorelation coefficiant
density, r_hist = np.histogram(r,25,normed=True)
mode = (r_hist[np.argmax(density)+1] + r_hist[np.argmax(density)])/2
#Calculating Confidence Interval
r = np.sort(r)
CI_min_idx = int(0.005*nbstep)
CI_max_idx = int(0.995*nbstep)
CI_min = mode - r[CI_min_idx]
CI_max = r[CI_max_idx] - mode
print("%5.3f %5.3f %5.3f %5.3f" % (main_r, mode, CI_min, CI_max))
#Plotting Correlation Coefficiant Distribution
fig = plt.figure()
plt.subplots_adjust(left=0.15, right=0.9, top=0.9, bottom=0.15)
ax = fig.add_subplot(111)
n, bins, patches = ax.hist(r, 40, normed=1, facecolor='#B2B2B2', alpha=0.75, lw=0.1)
plt.title('Mode = {0:.3f}\nConf. Int. = -{1:.3f}/+{2:.3f}' .format(mode, CI_min,CI_max), fontsize=18, fontname='Times new Roman')
bincenters = 0.5*(bins[1:]+bins[:-1])
#y = mlab.normpdf( bincenters, mode, np.std(r))
#l = ax.plot(bincenters, y, 'k--', lw=1)
ax.set_xlabel('Correlation Coefficient',fontsize=24, fontname='Times new Roman')
ax.set_ylabel('Density', fontsize=24, fontname='Times new Roman')
xtics=ax.get_xticks()
plt.xticks(xtics,fontsize=24, fontname='Times new Roman')
ytics=ax.get_yticks()
plt.yticks(ytics,fontsize=24, fontname='Times new Roman')
plt.savefig(fname,dpi=300, orientation='landscape')
return [Id_0_FitCoef, Id_1_FitCoef]
class Complex(object):
def __init__(self,MmFile,PolFile,APolFile,K):
self.TotalEn = []
self.Vdw, self.Elec, self.Pol, self.Sas, self.Sav, self.Wca =[], [], [], [], [], []
self.MmFile = MmFile
self.PolFile = PolFile
self.APolFile = APolFile
self.freeEn = math.log(K*(10**-9)) * 2.5
self.AvgEnBS = []
self.CI = []
self.FinalAvgEnergy = 0
self.StdErr = 0
def CalcEnergy(self,args,frame_wise,idx):
mmEn = ReadData(self.MmFile,n=7)
polEn = ReadData(self.PolFile,n=4)
apolEn = ReadData(self.APolFile,n=10)
CheckEnData(mmEn,polEn,apolEn)
time, MM, Vdw, Elec, Pol, Apol, Sas, Sav, Wca = [], [], [], [], [], [], [], [], []
for i in range(len(mmEn[0])):
#Vacuum MM
Energy = mmEn[5][i] + mmEn[6][i] - (mmEn[1][i] + mmEn[2][i] + mmEn[3][i] + mmEn[4][i])
MM.append(Energy)
Energy = mmEn[5][i] - (mmEn[1][i] + mmEn[3][i])
Vdw.append(Energy)
Energy = mmEn[6][i] - (mmEn[2][i] + mmEn[4][i])
Elec.append(Energy)
# Polar
Energy = polEn[3][i] - (polEn[1][i] + polEn[2][i])
Pol.append(Energy)
#Non-polar
Energy = apolEn[3][i] + apolEn[6][i] + apolEn[9][i] - (apolEn[1][i] + apolEn[2][i] + apolEn[4][i] + apolEn[5][i] + apolEn[7][i] + apolEn[8][i])
Apol.append(Energy)
Energy = apolEn[3][i] - (apolEn[1][i] + apolEn[2][i])
Sas.append(Energy)
Energy = apolEn[6][i] - (apolEn[4][i] + apolEn[5][i])
Sav.append(Energy)
Energy = apolEn[9][i] - (apolEn[7][i] + apolEn[8][i])
Wca.append(Energy)
#Final Energy
time.append(mmEn[0][i])
Energy = MM[i] + Pol[i] + Apol[i]
self.TotalEn.append(Energy)
# Writing frame wise component energy to file
frame_wise.write('\n#Complex %d\n' % ( (idx+1)))
for i in range(len(time)):
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf %15.3lf' % (time[i], mmEn[1][i], mmEn[2][i], polEn[1][i], (apolEn[1][i] + apolEn[4][i] + apolEn[7][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf' % (mmEn[3][i], mmEn[4][i], polEn[2][i], (apolEn[2][i] + apolEn[5][i] + apolEn[8][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf' % (mmEn[5][i], mmEn[6][i], polEn[3][i], (apolEn[3][i] + apolEn[6][i] + apolEn[9][i])))
frame_wise.write('%15.3lf %15.3lf %15.3lf %15.3lf\n' % (MM[i], Pol[i], Apol[i], self.TotalEn[i]))
#Bootstrap analysis energy components
if(args.bootstrap):
bsteps = args.nbstep
avg_energy, error = BootStrap(Vdw,bsteps)
self.Vdw.append(avg_energy)
self.Vdw.append(error)
avg_energy, error = BootStrap(Elec,bsteps)
self.Elec.append(avg_energy)
self.Elec.append(error)
avg_energy, error = BootStrap(Pol,bsteps)
self.Pol.append(avg_energy)
self.Pol.append(error)
avg_energy, error = BootStrap(Sas,bsteps)
self.Sas.append(avg_energy)
self.Sas.append(error)
avg_energy, error = BootStrap(Sav,bsteps)
self.Sav.append(avg_energy)
self.Sav.append(error)
avg_energy, error = BootStrap(Wca,bsteps)
self.Wca.append(avg_energy)
self.Wca.append(error)
#Bootstrap => Final Average Energy
self.AvgEnBS, AvgEn, EnErr, CI = ComplexBootStrap(self.TotalEn,bsteps)
self.FinalAvgEnergy = AvgEn
self.StdErr = EnErr
self.CI = CI
#If not bootstrap then average and standard deviation
else:
self.Vdw.append(np.mean(Vdw))
self.Vdw.append(np.std(Vdw))
self.Elec.append(np.mean(Elec))
self.Elec.append(np.std(Elec))
self.Pol.append(np.mean(Pol))
self.Pol.append(np.std(Pol))
self.Sas.append(np.mean(Sas))
self.Sas.append(np.std(Sas))
self.Sav.append(np.mean(Sav))
self.Sav.append(np.std(Sav))
self.Wca.append(np.mean(Wca))
self.Wca.append(np.std(Wca))
self.FinalAvgEnergy = np.mean(self.TotalEn)
self.StdErr = np.std(self.TotalEn)
def Summary_Output_File(AllComplex,args):
try:
fs = open(args.outsum,'w')
except:
raise IOError ('Could not open file {0} for writing. \n' .format(args.outsum))
if args.multiple:
try:
fm = open(args.outmeta,'w')
except:
raise IOError ('Could not open file {0} for writing. \n' .format(args.outmeta))
fm.write('# Complex_Number\t\tTotal_Binding_Energy\t\tError\n')
for n in range(len(AllComplex)):
fs.write('\n\n#Complex Number: %4d\n' % (n+1))
fs.write('===============\n SUMMARY \n===============\n\n')
fs.write('\n van der Waal energy = %15.3lf +/- %7.3lf kJ/mol\n' % (AllComplex[n].Vdw[0], AllComplex[n].Vdw[1]))
fs.write('\n Electrostattic energy = %15.3lf +/- %7.3lf kJ/mol\n' % (AllComplex[n].Elec[0],AllComplex[n].Elec[1]))
fs.write('\n Polar solvation energy = %15.3lf +/- %7.3lf kJ/mol\n' % (AllComplex[n].Pol[0], AllComplex[n].Pol[1]))
fs.write('\n SASA energy = %15.3lf +/- %7.3lf kJ/mol\n' % (AllComplex[n].Sas[0], AllComplex[n].Sas[1]))
fs.write('\n SAV energy = %15.3lf +/- %7.3lf kJ/mol\n' % (AllComplex[n].Sav[0], AllComplex[n].Sav[1]))
fs.write('\n WCA energy = %15.3lf +/- %7.3lf kJ/mol\n' % (AllComplex[n].Wca[0], AllComplex[n].Wca[1]))
fs.write('\n Binding energy = %15.3lf +/- %7.3lf kJ/mol\n' % (AllComplex[n].FinalAvgEnergy, AllComplex[n].StdErr))
fs.write('\n===============\n END \n===============\n\n')
if args.multiple:
fm.write('%5d %15.3lf %7.3lf\n' % (n+1 , AllComplex[n].FinalAvgEnergy, AllComplex[n].StdErr))
def CheckEnData(mmEn,polEn,apolEn):
frame = len(mmEn[0])
for i in range(len(mmEn)):
if(len(mmEn[i]) != frame):
raise ValueError("In MM file, size of columns are not equal.")
for i in range(len(polEn)):
if(len(polEn[i]) != frame):
raise ValueError("In Polar file, size of columns are not equal.")
for i in range(len(apolEn)):
if(len(apolEn[i]) != frame):
raise ValueError("In APolar file, size of columns are not equal.")
def ParseOptions():
parser = argparse.ArgumentParser()
parser.add_argument("-mt", "--multiple", help='If given, calculate for multiple complexes. Need Metafile containing path of energy files',action="store_true")
parser.add_argument("-mf", "--metafile", help='Metafile containing path to energy files of each complex in a row obtained from g_mmpbsa in following order: \
[MM file] [Polar file] [ Non-polar file] [Ki] \
Ki Should be in NanoMolar (nM)',action="store", default='metafile.dat', metavar='metafile.dat')
parser.add_argument("-m", "--molmech", help='Vacuum Molecular Mechanics energy file obtained from g_mmpbsa',action="store", default='energy_MM.xvg', metavar='energy_MM.xvg')
parser.add_argument("-p", "--polar", help='Polar solvation energy file obtained from g_mmpbsa',action="store",default='polar.xvg', metavar='polar.xvg')
parser.add_argument("-a", "--apolar", help='Non-Polar solvation energy file obtained from g_mmpbsa',action="store",default='apolar.xvg',metavar='apolar.xvg')
parser.add_argument("-bs", "--bootstrap", help='If given, Enable Boot Strap analysis',action="store_true")
parser.add_argument("-nbs", "--nbstep", help='Number of boot strap steps for average energy calculation',action="store", type=int,default=1000)
parser.add_argument("-of", "--outfr", help='Energy File: Energy components frame wise',action="store",default='full_energy.dat', metavar='full_energy.dat')
parser.add_argument("-os", "--outsum", help='Final Energy File: Full Summary of energy components',action="store",default='summary_energy.dat', metavar='summary_energy.dat')
parser.add_argument("-om", "--outmeta", help='Final Energy File for Multiple Complexes: Complex wise final binding nergy',action="store",default='meta_energy.dat', metavar='meta_energy.dat')
parser.add_argument("-ep", "--enplot", help='Experimental Energy vs Calculated Energy Correlation Plot',action="store",default='enplot.png', metavar='enplot.png')
parser.add_argument("-cd", "--corrdist", help='Correlation distribution data from bootstrapping',action="store",default='corrdist.dat', metavar='corrdist.dat')
parser.add_argument("-cp", "--corrplot", help='Plot of correlation distribution',action="store",default='corrdist.png', metavar='corrdist.png')
if len(sys.argv) < 2:
print('ERROR: No input files. Need help!!!')
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.multiple:
if not os.path.exists(args.metafile):
print('\nERROR: {0} not found....\n' .format(args.metafile))
parser.print_help()
sys.exit(1)
else:
if not os.path.exists(args.molmech):
print('\nERROR: {0} not found....\n' .format(args.molmech))
parser.print_help()
sys.exit(1)
if not os.path.exists(args.polar):
print('\nERROR: {0} not found....\n' .format(args.polar))
parser.print_help()
sys.exit(1)
if not os.path.exists(args.apolar):
print('\nERROR: {0} not found....\n' .format(args.apolar))
parser.print_help()
sys.exit(1)
return args
def ReadData(FileName,n=2):
infile = open(FileName,'r')
x, data = [],[]
for line in infile:
line = line.rstrip('\n')
if not line.strip():
continue
if(re.match('#|@',line)==None):
temp = line.split()
data.append(np.array(temp))
for j in range(0,n):
x_temp =[]
for i in range(len(data)):
try:
value = float(data[i][j])
except:
raise FloatingPointError('\nCould not convert {0} to floating point number.. Something is wrong in {1}..\n' .format(data[i][j], FileName))
x_temp.append(value)
x.append(x_temp)
return x
def ComplexBootStrap(x,step=1000):
avg =[]
x = np.array(x)
n = len(x)
idx = np.random.randint(0,n,(step,n))
sample_x = x[idx]
avg = np.sort(np.mean(sample_x,1))
CI_min = avg[int(0.005*step)]
CI_max = avg[int(0.995*step)]
#print('Energy = %13.3f; Confidance Interval = (-%-5.3f / +%-5.3f)\n' % (np.mean(avg), (np.mean(avg)-CI_min), (CI_max-np.mean(avg))))
return avg, np.mean(avg), np.std(avg), [(np.mean(avg)-CI_min), (CI_max-np.mean(avg))]
def BootStrap (x,step=1000):
if(np.mean(x)) == 0:
return 0.000, 0.000
else:
avg =[]
x = np.array(x)
n = len(x)
idx = np.random.randint(0,n,(step,n))
sample_x = x[idx]
avg = np.sort(np.mean(sample_x,1))
return np.mean(avg),np.std(avg)
def find_nearest_index(array,value):
idx = (np.abs(array-value)).argmin()
return idx
def ReadMetafile(metafile):
MmFile,PolFile, APolFile, Ki = [], [], [], []
FileList = open(metafile,'r')
for line in FileList:
line = line.rstrip('\n')
if not line.strip():
continue
temp = line.split()
MmFile.append(temp[0])
PolFile.append(temp[1])
APolFile.append(temp[2])
Ki.append(float(temp[3]))
if not os.path.exists(temp[0]):
raise IOError('Could not open file {0} for reading. \n' .format(temp[0]))
if not os.path.exists(temp[1]):
raise IOError('Could not open file {0} for reading. \n' .format(temp[1]))
if not os.path.exists(temp[2]):
raise IOError('Could not open file {0} for reading. \n' .format(temp[2]))
return MmFile, PolFile, APolFile, Ki
if __name__=="__main__":
main()
| gpl-3.0 |
fraka6/trading-with-python | cookbook/getDataFromYahooFinance.py | 77 | 1391 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
| bsd-3-clause |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/frame/test_sorting.py | 2 | 17804 | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_sort_values(self):
# API for 9816
# sort_index
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
frame.sort(columns='A')
with tm.assert_produces_warning(FutureWarning):
frame.sort()
unordered = frame.ix[[3, 2, 4, 1]]
expected = unordered.sort_index()
result = unordered.sort_index(axis=0)
assert_frame_equal(result, expected)
unordered = frame.ix[:, [2, 1, 3, 0]]
expected = unordered.sort_index(axis=1)
result = unordered.sort_index(axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result, expected)
# sortlevel
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level='A', sort_remaining=False)
expected = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(result, expected)
df = df.T
result = df.sort_index(level='A', axis=1, sort_remaining=False)
expected = df.sortlevel('A', axis=1, sort_remaining=False)
assert_frame_equal(result, expected)
# MI sort, but no by
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(sort_remaining=False)
expected = df.sort_index()
assert_frame_equal(result, expected)
def test_sort_index(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
sorted_df = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(sorted_df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
sorted_df = unordered.sort_index(axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = unordered.sort_index(axis=1, ascending=False)
expected = frame.ix[:, ::-1]
assert_frame_equal(sorted_df, expected)
# by column
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# check for now
sorted_df = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected[::-1])
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
sorted_df = frame.sort_values(by=['A', 'B'])
assert_frame_equal(sorted_df, expected[::-1])
self.assertRaises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
msg = 'When sorting by column, axis must be 0'
with assertRaisesRegexp(ValueError, msg):
frame.sort_values(by='A', axis=1)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with assertRaisesRegexp(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype('category', categories=list('cab'))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_index_multicolumn(self):
import random
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
self.assertNotEqual(a_id, id(df['A']))
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.ix[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_values(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_values(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with assertRaisesRegexp(ValueError, 'levels'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'levels'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sortlevel(['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with assertRaisesRegexp(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
| gpl-2.0 |
lazywei/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
blueray45/GSRT | TFDisplayTools.py | 1 | 16853 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 07 13:26:28 2015
Display package
to display TF and friends nicely and well done
@author: irnakat
"""
import numpy as np
import pylab as plt
from matplotlib.ticker import ScalarFormatter
# from matplotlib.ticker import StrMethodFormatter
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import FuncFormatter
import matplotlib.cm as cm
def majortickformat(x, pos):
'The two args are the value and tick position'
return '%.1f'%(x)
def minortickformat(x, pos):
'The two args are the value and tick position'
strtemp = str(x)
if x>=1:
if int(strtemp[0])>5:
return ''
else:
return '%d' % (x)
else:
return ''
def cbtickformat(x,pos):
'Tick formatter for colorbar'
return '%.1f'%x
def colorcycle(ncolorinput):
ncolor = 256
clist = cm.rainbow(np.arange(ncolor))
cclist = []
if ncolorinput>=2:
for i in range(ncolorinput-1):
n = ncolor/(ncolorinput-1)
cclist.append(clist[n*i])
cclist.append(clist[-1])
return cclist
def velocityprofileplot(inp,a2,cclist):
xmax = []
xmin = []
for i in range(len(inp)):
hl = inp[i].hl
depthtemp = np.concatenate(([0.],np.cumsum(hl)))
try:
vptemp = inp[i].vp
novp = False
except:
print('vp is not found for argument : %d!'%i)
novp = True
vstemp = inp[i].vs
depth = [0.]
vs = [vstemp[0]/1000.]
if not novp:
vp = [vptemp[0]/1000.]
for j in range(1,len(hl)):
depth.append(depthtemp[j])
depth.append(depthtemp[j])
vs.append(vstemp[j-1]/1000.)
vs.append(vstemp[j]/1000.)
if not novp:
vp.append(vptemp[j-1]/1000.)
vp.append(vptemp[j]/1000.)
depth.append(depth[-1]+0.1*depth[-1])
vs.append(vs[-1])
if not novp:
vp.append(vp[-1])
if i==len(inp)-1:
a2.plot(vp,depth,color=cclist[i],linestyle=':',lw=3.,label='vp')
a2.plot(vs,depth,color=cclist[i],lw=3.,label='vs')
else:
a2.plot(vp,depth,color=cclist[i],linestyle=':',lw=3.)
a2.plot(vs,depth,color=cclist[i],lw=3.)
xmin.append(np.min(vs))
xmax.append(np.max(vp))
else:
if i==len(inp)-1:
a2.plot(vs,depth,color=cclist[i],lw=3.,label='vs')
else:
a2.plot(vs,depth,color=cclist[i],lw=3.)
xmin.append(np.min(vs))
xmax.append(np.max(vs))
a2.set_ylim(np.min(depth),np.max(depth))
a2.set_xlim(min(xmin)-0.05,max(xmax)+0.05)
plt.xticks(rotation='vertical')
a2.legend(loc='best',fancybox=True,framealpha=0.5)
a2.invert_yaxis()
a2.grid(True)
a2.set_xlabel('Velocity (km/s)')
a2.set_ylabel('Depth (m)')
a2.set_title('Velocity profile')
def TFPlot(*arg,**kwargs):
# check tfid plot
try:
tfid = kwargs['tfid']
except:
tfid = 0
# check given axis name
try:
axname = kwargs['axname']
except KeyError:
axname = None
try:
label = kwargs['label']
except KeyError:
label = ['']*(len(arg))
# create new figure is axis name is not given
if axname==None:
f = plt.figure(figsize=(10.,5.),dpi=300)
# create default color cycle
cclist = colorcycle(len(arg))
a2= f.add_subplot(1,5,5)
velocityprofileplot(arg,a2,cclist)
a = f.add_subplot(1,5,(1,4))
else:
a = axname
# set label and properties of axis
a.set_xlabel('Frequency (Hz)')
a.set_ylabel('Amplification')
a.set_yscale('log')
a.set_xscale('log')
a.grid(True,which='major',color='k')
a.grid(True,which='minor',color='grey')
a.minorticks_on()
a.tick_params(axis='both', which='major', labelsize=11, labelcolor='k')
a.tick_params(axis='both', which='minor', labelsize=10, labelcolor='grey')
for axis in [a.xaxis]:
axis.set_major_formatter(FuncFormatter(majortickformat))
axis.set_minor_formatter(FuncFormatter(minortickformat))
# check number of input
#if len(arg)%2!=0:
# raise InputError('Number or input pairs if not correct! Detected input pairs is %.1f.'%len(arg)/2.)
# check length of each pairs and plot data
minx = []; maxx = []; miny = []; maxy = []
for i in range(len(arg)):
freq = arg[i].freq
tf = arg[i].tf
a.plot(freq,np.abs(tf[tfid]),color=cclist[i],label=label[i])
minx.append(np.min(arg[i].freq))
maxx.append(np.max(arg[i].freq))
miny.append(np.min(np.abs(arg[i].tf[tfid])))
maxy.append(np.max(np.abs(arg[i].tf[tfid])))
minmax = [[np.min(minx),np.max(maxx)],
[np.min(miny),np.max(maxy)]]
percspan = 0.1
minmaxspan = [np.exp(np.log(minmax[0][1]-minmax[0][0])*percspan),
np.exp(np.log(minmax[1][1]-minmax[1][0])*percspan)]
a.set_xlim(minmax[0][0],minmax[0][1])
a.set_ylim(minmax[1][0]-minmaxspan[1],minmax[1][1]+minmaxspan[1])
a.legend(loc='best',fancybox=True,framealpha=0.5)
f.tight_layout()
def PhasePlot(*arg,**kwargs):
# check tfid plot
try:
tfid = kwargs['tfid']
except:
tfid = 0
# check given axis name
try:
axname = kwargs['axname']
except KeyError:
axname = None
try:
label = kwargs['label']
except KeyError:
label = ['']*(len(arg))
# create new figure is axis name is not given
if axname==None:
f = plt.figure(figsize=(10.,5.),dpi=300)
# create default color cycle
cclist = colorcycle(len(arg))
a2= f.add_subplot(1,5,5)
velocityprofileplot(arg,a2,cclist)
a = f.add_subplot(1,5,(1,4))
else:
a = axname
# set label and properties of axis
a.set_xlabel('Frequency (Hz)')
a.set_ylabel('Phase (Rad)')
#a.set_yscale('log')
a.set_xscale('log')
a.grid(True,which='major',color='k')
a.grid(True,which='minor',color='grey')
#a.minorticks_on()
a.tick_params(axis='both', which='major', labelsize=11, labelcolor='k')
#a.tick_params(axis='both', which='minor', labelsize=10, labelcolor='grey')
for axis in [a.xaxis]:
axis.set_major_formatter(FuncFormatter(majortickformat))
axis.set_minor_formatter(FuncFormatter(minortickformat))
# check length of each pairs and plot data
minx = []; maxx = []; miny = []; maxy = []
for i in range(len(arg)):
freq = arg[i].freq
tf = arg[i].tf
y = np.angle(tf[tfid])
a.plot(freq,y,label=label[i],color=cclist[i])
minx.append(np.min(arg[i].freq))
maxx.append(np.max(arg[i].freq))
miny.append(np.min(y))
maxy.append(np.max(y))
minmax = [[np.min(minx),np.max(maxx)],
[np.min(miny),np.max(maxy)]]
percspan = 0.1
minmaxspan = [np.exp(np.log(minmax[0][1]-minmax[0][0])*percspan),
(minmax[1][1]-minmax[1][0])*percspan]
a.set_xlim(minmax[0][0],minmax[0][1])
a.set_ylim(minmax[1][0]-minmaxspan[1],minmax[1][1]+minmaxspan[1])
a.legend(loc='best',fancybox=True,framealpha=0.5)
f.tight_layout()
def SpectroPlot(data,nx=100,ny=100,ylabel='incidence angle',zlabel='Amplification',yscale='lin',cmap='rainbow'):
import scipy.interpolate
from matplotlib.colors import LogNorm
from matplotlib.ticker import MaxNLocator
if data['sensitivity']==False:
raise IOError("Sensitivity calculation hasn't been performed! Please do so.")
x = np.asarray(data['x'])
y = np.asarray(data['y'])
z = np.asarray(data['z'])
xi,yi = np.logspace(np.log10(x.min()),np.log10(x.max()),nx), np.linspace(y.min(),y.max(),ny)
xi,yi = np.meshgrid(xi,yi)
# interpolation
zi = scipy.interpolate.griddata((x,y),z,(xi,yi),method='linear')
# plot the data
f = plt.figure(figsize=(10.,5.),dpi=300)
# plot velocity profile
a2= f.add_subplot(1,5,5)
if type(data['hl'][0])==float:
xmin = []
xmax = []
# plot vp
if data['modeID']>=5:
if type(data['vp'][0])==list:
lvp = len(data['vp'][0])
clistvp = cm.cool(np.arange(lvp))
for i in range(len(data['vp'][0])):
vptemp = np.concatenate(([data['vp'][0][i]],data['vp'][1:]))
depthtemp = np.concatenate(([0.],np.cumsum(data['hl'])))
depth = [0.]
vp = [vptemp[0]/1000.]
for j in range(1,len(depthtemp)-1):
depth.append(depthtemp[j])
vp.append(vptemp[j-1]/1000.)
depth.append(depthtemp[j])
vp.append(vptemp[j]/1000.)
depth.append(depth[-1]+0.1*depth[-1])
vp.append(vp[-1])
a2.plot(vp,depth,color=clistvp[i])
xmax.append(np.max(vp))
else:
vptemp = data['vp']
depthtemp = np.concatenate(([0.],np.cumsum(data['hl'])))
depth = [0.]
vp = [vptemp[0]/1000.]
for j in range(1,len(depthtemp)-1):
depth.append(depthtemp[j])
vp.append(vptemp[j-1]/1000.)
depth.append(depthtemp[j])
vp.append(vptemp[j]/1000.)
depth.append(depth[-1]+0.1*depth[-1])
vp.append(vp[-1])
a2.plot(vp,depth,color='b')
xmax.append(np.max(vp))
# plot vs
if type(data['vs'][0])==list:
lvs = len(data['vs'][0])
clistvs = cm.hot(np.arange(lvs))
for i in range(len(data['vs'][0])):
vstemp = np.concatenate(([data['vs'][0][i]],data['vs'][1:]))
depthtemp = np.concatenate(([0.],np.cumsum(data['hl'])))
depth = [0.]
vs = [vstemp[0]/1000.]
for j in range(1,len(depthtemp)-1):
depth.append(depthtemp[j])
vs.append(vstemp[j-1]/1000.)
depth.append(depthtemp[j])
vs.append(vstemp[j]/1000.)
depth.append(depth[-1]+0.1*depth[-1])
vs.append(vs[-1])
a2.plot(vs,depth,color=clistvs[i])
xmin.append(np.min(vs))
xmax.append(np.max(vs))
else:
vstemp = data['vs']
depthtemp = np.concatenate(([0.],np.cumsum(data['hl'])))
depth = [0.]
vs = [vstemp[0]/1000.]
for j in range(1,len(depthtemp)-1):
depth.append(depthtemp[j])
vs.append(vstemp[j-1]/1000.)
depth.append(depthtemp[j])
vs.append(vstemp[j]/1000.)
depth.append(depth[-1]+0.1*depth[-1])
vs.append(vs[-1])
a2.plot(vs,depth,color='r')
xmin.append(np.min(vs))
xmax.append(np.max(vs))
a2.set_xlim(np.min(xmin)-0.05,np.max(xmax)+0.05)
a2.set_ylim(np.min(depth),np.max(depth))
else:
if data['modeID']>=5:
ld = len(data['hl'][0])
clistvp = cm.cool(np.arange(ld))
clistvs = cm.hot(np.arange(ld))
for i in range(len(data['hl'][0])):
hl = np.concatenate(([data['hl'][0][i]],data['hl'][1:]))
depthtemp = np.concatenate(([0.],np.cumsum(hl)))
vptemp = data['vp']
vstemp = data['vs']
depth = [0.]
vs = [vstemp[0]/1000.]
vp = [vptemp[0]/1000.]
for j in range(1,len(hl)):
depth.append(depthtemp[j])
vs.append(vstemp[j-1]/1000.)
vp.append(vptemp[j-1]/1000.)
depth.append(depthtemp[j])
vs.append(vstemp[j]/1000.)
vp.append(vptemp[j]/1000.)
depth.append(depth[-1]+0.1*depth[-1])
vs.append(vs[-1])
vp.append(vp[-1])
a2.plot(vp,depth,color=clistvp[i])
a2.plot(vs,depth,color=clistvs[i])
a2.set_xlim(np.min(vs)-0.05,np.max(vp)+0.05)
a2.set_ylim(np.min(depth),np.max(depth))
else:
ld = len(data['hl'][0])
clistvs = cm.hot(np.arange(ld))
for i in range(len(data['hl'][0])):
hl = np.concatenate(([data['hl'][0][i]],data['hl'][1:]))
vstemp = data['vs']
depthtemp = np.concatenate(([0.],np.cumsum(hl)))
depth = [0.]
vs =[vstemp[0]/1000.]
for j in range(1,len(hl)):
depth.append(depthtemp[j])
vs.append(vstemp[j-1]/1000.)
depth.append(depthtemp[j])
vs.append(vstemp[j]/1000.)
depth.append(depth[-1]+0.1*depth[-1])
vs.append(vs[-1])
a2.plot(vs,depth,color=clistvs[i])
a2.set_xlim(np.min(vs)-0.05,np.max(vs)+0.05)
a2.set_ylim(np.min(depth),np.max(depth))
a2.invert_yaxis()
a2.set_xlabel('Velocity (km/s)')
a2.set_ylabel('Depth (m)')
a2.set_title('Velocity profile')
plt.xticks(rotation='vertical')
# plot data
a = f.add_subplot(1,5,(1,4))
#zi = np.log10(zi)
#z = np.log10(z)
am = a.imshow(zi, vmin=0.1, vmax=z.max(), origin='lower', extent=[x.min(), x.max(), y.min(), y.max()],
aspect = 'auto',norm=LogNorm())
a.set_xlabel('Frequency (Hz)')
a.set_ylabel(ylabel)
a.set_xscale('log')
if yscale=='log':
print yscale
a.set_yscale('log')
a.minorticks_on()
a.tick_params(axis='x', which='major', labelsize=11, labelcolor='k')
a.tick_params(axis='x', which='minor', labelsize=10, labelcolor='grey')
for axis in [a.xaxis]:
axis.set_major_formatter(FuncFormatter(majortickformat))
axis.set_minor_formatter(FuncFormatter(minortickformat))
cb = plt.colorbar(am,label=zlabel)
cb.locator = MaxNLocator(10)
# cb.formatter = ScalarFormatter()
cb.formatter = FuncFormatter(cbtickformat)
cb.update_ticks()
f.tight_layout()
"""
# test display
import IOfile
from TFCalculator import TFCalculator as TFC
from sensitivityTools import sensitivityTools as sT
fname = 'sampleinput_linear_elastic_6layer_halfspace.dat'
data = IOfile.parsing_input_file(fname)
theclass = TFC(data)
tf1 = theclass.tf_kramer286_sh()
fname2 = 'sampleinput_linear_elastic_1layer_halfspace.dat'
data2 = IOfile.parsing_input_file(fname2)
theclass2 = TFC(data2)
tf2 = theclass2.tf_kramer286_sh()
TFPlot(theclass,theclass2,label=['six layers','one layer'])
PhasePlot(theclass,theclass2)
"""
"""
fname3 = 'sampleinput_linear_elastic_1layer_halfspace_adv.dat'
data3 = IOfile.parsing_input_file(fname2)
x = np.array([])
y = np.array([])
z = np.array([])
ianglist = np.linspace(0.0,90.0,91)
for i in range(len(ianglist)):
data3['iang'] = np.deg2rad(ianglist[i])
theclass3 = TFC(data3)
tf3 = theclass3.tf_knopoff_sh_adv()
#print data3['iang']
#print np.shape(np.abs(tf3[0][:,0])),np.shape(z)
x = np.concatenate((x,theclass3.freq))
z = np.concatenate((z,np.abs(tf3[0])))
y = np.concatenate((y,np.zeros_like(theclass3.freq)+ianglist[i]))
SpectroPlot(x,y,z,nx=100,ny=100,ylabel='incidence angle',zlabel='Amplification',cmap='rainbow')
"""
| gpl-2.0 |
craigcitro/pydatalab | tests/ml/facets_tests.py | 4 | 2106 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import pandas as pd
from google.datalab.ml import FacetsOverview, FacetsDiveview
class TestFacets(unittest.TestCase):
"""Tests facets visualization components."""
def _create_test_data(self):
data1 = [
{'num1': 1.2, 'weekday': 'Monday', 'occupation': 'software engineer'},
{'num1': 3.2, 'weekday': 'Tuesday', 'occupation': 'medical doctor'},
]
data2 = [
{'num1': -2.8, 'weekday': 'Friday', 'occupation': 'musician'},
]
data1 = pd.DataFrame(data1)
data2 = pd.DataFrame(data2)
return data1, data2
def test_overview_plot(self):
"""Tests overview."""
data1, data2 = self._create_test_data()
output = FacetsOverview().plot({'data1': data1, 'data2': data2})
# Output is an html. Ideally we can parse the html and verify nodes, but since the html
# is output by a polymer component which is tested separately, we just verify
# minumum keywords.
self.assertIn("facets-overview", output)
self.assertIn("<script>", output)
def test_dive_plot(self):
"""Tests diveview."""
data1, _ = self._create_test_data()
output = FacetsDiveview().plot(data1)
# Output is an html. Ideally we can parse the html and verify nodes, but since the html
# is output by a polymer component which is tested separately, we just verify
# minumum keywords.
self.assertIn("facets-dive", output)
self.assertIn("<script>", output)
| apache-2.0 |
ch3ll0v3k/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 105 | 22788 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
jhbradley/moose | scripts/git_commit_history.py | 18 | 9854 | #!/usr/bin/env python
import sys
import subprocess
import datetime
import re
import numpy
import matplotlib.pyplot as plt
import multiprocessing
import argparse
import itertools
import os
# A helper function for running git commands
def run(*args, **kwargs):
options = kwargs.pop("options", None)
if options:
loc = os.getenv('MOOSE_DIR', os.path.join(os.getenv('HOME'), 'projects', 'moose'))
if options.framework:
loc = os.path.join(loc, 'framework')
elif options.modules:
loc = os.path.join(loc, 'modules')
args += ('--', loc)
output, _ = subprocess.Popen(args, stdout = subprocess.PIPE).communicate()
if kwargs.pop('split',True):
return filter(None, output.split('\n'))
else:
return output
# Return the list of contributors, sorted by the number of contributions
def getContributors(options, **kwargs):
# Get the number of authors
num_authors = kwargs.pop('authors', options.authors)
# Extract the authors and total number of commits
log = run('git', 'shortlog', '-s', '--no-merges', options=options)
authors = []
commits = []
for row in log:
r = row.split('\t')
commits.append(int(r[0]))
authors.append(r[1])
# Return the authors sorted by commit count
contributors = [x for (y,x) in sorted(zip(commits, authors), reverse=True)]
# Limit to the supplied number of authors
n = len(contributors)
if num_authors == 'moose':
contributors = ['Derek Gaston', 'Cody Permann', 'David Andrs', 'John W. Peterson', 'Andrew E. Slaughter']
contributors += ['Other (' + str(n-len(contributors)) + ')']
elif num_authors:
num_authors = int(num_authors)
contributors = contributors[0:num_authors]
contributors += ['Other (' + str(n-num_authors) + ')']
return contributors
# Return the date and contribution date
def getData(options):
# Build a list of contributors
contributors = getContributors(options)
# Flag for lumping into two categories MOOSE developers and non-moose developers
dev = options.moose_dev
if dev:
moose_developers = contributors[0:-1]
contributors = ['MOOSE developers (' + str(len(contributors)-1) + ')', contributors[-1]]
# Build a list of unique dates
all_dates = sorted(set(run('git', 'log', '--reverse', '--format=%ad', '--date=short', options=options)))
d1 = datetime.datetime.strptime(all_dates[0], '%Y-%m-%d')
d2 = datetime.datetime.strptime(all_dates[-1], '%Y-%m-%d')
dates = [d1 + datetime.timedelta(days=x) for x in range(0, (d2-d1).days, options.days)]
# Build the data arrays, filled with zeros
N = numpy.zeros((len(contributors), len(dates)), dtype=int)
data = {'commits' : numpy.zeros((len(contributors), len(dates)), dtype=int),
'in' : numpy.zeros((len(contributors), len(dates)), dtype=int),
'out' : numpy.zeros((len(contributors), len(dates)), dtype=int)}
contrib = numpy.zeros(len(dates), dtype=int)
all_contributors = getContributors(options, authors=None)
unique_contributors = []
# Get the additions/deletions
commits = run('git', 'log', '--format=%H\n%ad\n%aN', '--date=short', '--no-merges', '--reverse', '--shortstat', split=False, options=options)
commits = filter(None, re.split(r'[0-9a-z]{40}', commits))
# Loop over commits
for commit in commits:
c = filter(None, commit.split('\n'))
date = datetime.datetime.strptime(c[0], '%Y-%m-%d')
author = c[1]
if dev and author in moose_developers:
author = contributors[0]
elif author not in contributors:
author = contributors[-1]
i = contributors.index(author) # author index
d = filter(lambda x: x > date, dates)
if d:
j = dates.index(d[0])
else:
j = dates.index(dates[-1])
data['commits'][i,j] += 1
if options.additions and len(c) == 3:
a = c[2].split()
n = len(a)
files = int(a[0])
if n == 5:
if a[4].startswith('insertion'):
plus = int(a[3])
minus = 0
else:
minus = int(a[3])
plus = 0
else:
plus = int(a[3])
minus = int(a[5])
data['in'][i,j] += plus
data['out'][i,j] += minus
# Count unique contributions
unique_author_index = all_contributors.index(c[1])
unique_author = all_contributors[unique_author_index]
if unique_author not in unique_contributors:
unique_contributors.append(unique_author)
contrib[j] += 1
# Perform cumulative summations
data['commits'] = numpy.cumsum(data['commits'], axis=1)
contrib = numpy.cumsum(contrib)
# Return the data
return dates, data, contrib, contributors
# MAIN
if __name__ == '__main__':
# Command-line options
parser = argparse.ArgumentParser(description="Tool for building commit history of a git repository")
parser.add_argument('--additions', action='store_true', help='Show additions/deletions graph')
parser.add_argument('--days', type=int, default=1, help='The number of days to lump data (e.g., use 7 for weekly data)')
parser.add_argument('--disable-legend', action='store_true', help='Disable display of legend')
parser.add_argument('--stack', '-s', action='store_true', help='Show graph as stacked area instead of line plot')
parser.add_argument('--unique', '-u', action='store_true', help='Show unique contributor on secondary axis')
parser.add_argument('--open-source', '-r', action='store_true', help='Show shaded region for open sourcing of MOOSE')
parser.add_argument('--pdf', '--file', '-f', action='store_true', help='Write the plot to a pdf file (see --output)')
parser.add_argument('--output', '-o', type=str, default='commit_history.pdf', help='The filename for writting the plot to a file')
parser.add_argument('--authors', default=None, help='Limit the graph to the given number of entries authors, or use "moose" to limit to MOOSE developers')
parser.add_argument('--moose-dev', action='store_true', help='Create two categories: MOOSE developers and other (this overrides --authors)')
parser.add_argument('--framework', action='store_true', help='Limit the analysis to framework directory')
parser.add_argument('--modules', action='store_true', help='Limit the analysis to modules directory')
parser.add_argument('--font', default=12, help='The font-size, in points')
parser.parse_args('-surf'.split())
options = parser.parse_args()
# Markers/colors
marker = itertools.cycle(('o', 'v', 's', 'd'))
color = itertools.cycle(('g', 'r', 'b', 'c', 'm', 'y', 'k'))
# Setup authors defaults for various cases
if options.moose_dev and options.authors:
raise Exception("Can not specify both --authors and --moose-dev");
elif options.moose_dev:
options.authors = 'moose'
# Error if both --framework and --modules are given
if options.framework and options.modules:
raise Exception("Can not specify both --framework and --modules")
# Extract the data
dates, data, contrib, contributors = getData(options)
# Create the figure
fig, ax1 = plt.subplots()
for tick in ax1.yaxis.get_ticklabels():
tick.set_fontsize(options.font)
for tick in ax1.xaxis.get_ticklabels():
tick.set_fontsize(options.font)
# Show unique contributors
if options.unique:
ax2 = ax1.twinx()
ax2.plot(dates, contrib, linewidth=4, linestyle='-', color='k')
ax2.set_ylabel('Unique Contributors', color='k', fontsize=options.font)
for tick in ax2.yaxis.get_ticklabels():
tick.set_fontsize(options.font)
for tick in ax2.xaxis.get_ticklabels():
tick.set_fontsize(options.font)
arrow = dict(arrowstyle="-|>", connectionstyle="arc3,rad=0.3", fc="w")
i = int(len(dates)*0.75)
c = int(contrib[-1]*0.75)
ax2.annotate('Unique Contributors', xy=(dates[i], contrib[i]), xytext=(datetime.date(2014,1,1), c), ha='right', size=options.font, arrowprops=arrow)
# labels
y_label = 'Commits'
# Plot the data
if options.stack: # stack plot
handles = plt.stackplot(dates, data['commits'])
for i in range(len(handles)):
handles[i].set_label(contributors[i])
elif options.additions: #additions/deletions plot
y_label = 'Additions / Deletions'
for i in range(len(contributors)):
x = numpy.array(dates)
y = data['in'][i,:]
label = contributors[i] + '(Additions)'
clr = color.next()
ax1.fill_between(x, 0, y, label=label, linewidth=2, edgecolor=clr, facecolor=clr, alpha=0.5)
ax1.plot([], [], color=clr, label=label) # legend proxy
y = -data['out'][i,:]
label = contributors[i] + '(Deletions)'
clr = color.next()
ax1.fill_between(x, 0, y, label=label, linewidth=2, edgecolor=clr, facecolor=clr, alpha=0.5)
ax1.plot([], [], color=clr, label=label) # legend proxy
if not options.disable_legend:
handles, labels = ax1.get_legend_handles_labels()
lgnd = plt.legend(handles, labels, loc='upper left', fontsize=options.font)
lgnd.draw_frame(False)
else: # line plot
handles = []
for i in range(len(contributors)):
x = numpy.array(dates)
y = data['commits'][i,:]
idx = y>0
h = ax1.plot(x[idx], y[idx], label=contributors[i], linewidth=2, markevery=60, marker=marker.next(), color=color.next())
handles.append(h[0])
if not options.disable_legend:
lgnd = plt.legend(handles, contributors, loc='upper left', fontsize=options.font)
lgnd.draw_frame(False)
# Add labels
ax1.set_ylabel(y_label, fontsize=options.font)
ax1.set_xlabel('Date', fontsize=options.font)
# Show open-source region
if options.open_source:
os = datetime.date(2014,3,10)
y_lim = plt.ylim()
delta = plt.xlim()[1] - os.toordinal()
plt.gca().add_patch(plt.Rectangle((os, y_lim[0]), delta, y_lim[1]-y_lim[0], facecolor='green', alpha=0.2))
# Write to a file
if options.pdf:
fig.savefig(options.output)
plt.tight_layout()
plt.show()
| lgpl-2.1 |
wlamond/scikit-learn | sklearn/kernel_ridge.py | 48 | 6731 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_samples] or [n_samples, n_targets]
Representation of weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
balazssimon/ml-playground | udemy/lazyprogrammer/reinforcement-learning-python/td0_prediction.py | 1 | 2303 | import numpy as np
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
SMALL_ENOUGH = 1e-3
GAMMA = 0.9
ALPHA = 0.1
ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R')
# NOTE: this is only policy evaluation, not optimization
def random_action(a, eps=0.1):
# we'll use epsilon-soft to ensure all states are visited
# what happens if you don't do this? i.e. eps=0
p = np.random.random()
if p < (1 - eps):
return a
else:
return np.random.choice(ALL_POSSIBLE_ACTIONS)
def play_game(grid, policy):
# returns a list of states and corresponding rewards (not returns as in MC)
# start at the designated start state
s = (2, 0)
grid.set_state(s)
states_and_rewards = [(s, 0)] # list of tuples of (state, reward)
while not grid.game_over():
a = policy[s]
a = random_action(a)
r = grid.move(a)
s = grid.current_state()
states_and_rewards.append((s, r))
return states_and_rewards
if __name__ == '__main__':
# use the standard grid again (0 for every step) so that we can compare
# to iterative policy evaluation
grid = standard_grid()
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# state -> action
policy = {
(2, 0): 'U',
(1, 0): 'U',
(0, 0): 'R',
(0, 1): 'R',
(0, 2): 'R',
(1, 2): 'R',
(2, 1): 'R',
(2, 2): 'R',
(2, 3): 'U',
}
# initialize V(s) and returns
V = {}
states = grid.all_states()
for s in states:
V[s] = 0
# repeat until convergence
for it in range(1000):
# generate an episode using pi
states_and_rewards = play_game(grid, policy)
# the first (s, r) tuple is the state we start in and 0
# (since we don't get a reward) for simply starting the game
# the last (s, r) tuple is the terminal state and the final reward
# the value for the terminal state is by definition 0, so we don't
# care about updating it.
for t in range(len(states_and_rewards) - 1):
s, _ = states_and_rewards[t]
s2, r = states_and_rewards[t+1]
# we will update V(s) AS we experience the episode
V[s] = V[s] + ALPHA*(r + GAMMA*V[s2] - V[s])
print("values:")
print_values(V, grid)
print("policy:")
print_policy(policy, grid)
| apache-2.0 |
belltailjp/scikit-learn | sklearn/lda.py | 56 | 17706 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
blancha/abcngspipelines | exomeseq/indelrealigner.py | 1 | 3024 | #!/usr/bin/env python3
# Version 1.1
# Author Alexis Blanchet-Cohen
# Date: 09/06/2014
import argparse
import glob
import os
import os.path
import pandas
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description="Generates GATK IndelRealigner scripts.")
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory. DEFAULT=indelrealigner", default="indelrealigner")
parser.add_argument("-i", "--inputDirectory", help="Input directory with BAM files. DEFAULT=../results/bwa", default="../results/bwa")
parser.add_argument("-o", "--outputDirectory", help="Output directory with realigned BAM files. DEFAULT=../results/bwa", default="../results/bwa")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# If not in the main scripts directory, cd to the main scripts directory, if it exists.
util.cdMainScriptsDirectory()
# Process the command line arguments.
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
# Read configuration files
config = util.readConfigurationFiles()
header = config.getboolean("server", "PBS_header")
toolsFolder = config.get("server", "toolsFolder")
genome = config.get("project", "genome")
genomeFolder = config.get(genome, "genomeFolder")
genomeFile = config.get(genome, "genomeFile")
xmx = config.get("indelrealigner", "xmx")
# Get samples
samples = util.getsamples(lanes=True)
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Write the scripts
for sample in samples:
# Write the script
scriptName = "indelrealigner_" + sample + ".sh"
script = open(scriptName, "w")
if header:
util.writeHeader(script, config, "indelrealigner")
script.write("java -Xmx" + xmx + " \\\n")
script.write("-jar " + os.path.join(toolsFolder, "GenomeAnalysisTK.jar") + " \\\n")
script.write("--analysis_type IndelRealigner" + " \\\n")
script.write("--reference_sequence " + genomeFile + " \\\n")
script.write("--targetIntervals ../realignertargetcreator/target_intervals.list" + " \\\n")
script.write("--input_file " + os.path.join(inputDirectory, sample, sample + "_deduplicated.bam") + " \\\n")
script.write("--knownAlleles " + os.path.join(genomeFolder, "1000G_phase1.indels.b37.vcf") + " \\\n")
script.write("--knownAlleles " + os.path.join(genomeFolder, "Mills_and_1000G_gold_standard.indels.b37.vcf") + " \\\n")
script.write("--out " + os.path.join(outputDirectory, sample, sample + "_realigned_reads.bam") + " \\\n")
script.write("&> " + scriptName + ".log")
script.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| gpl-3.0 |
chrisdev/django-pandas | django_pandas/io.py | 1 | 5096 | import pandas as pd
from .utils import update_with_verbose, get_related_model
import django
FieldDoesNotExist = (
django.db.models.fields.FieldDoesNotExist
if django.VERSION < (1, 8)
else django.core.exceptions.FieldDoesNotExist
)
def to_fields(qs, fieldnames):
for fieldname in fieldnames:
model = qs.model
for fieldname_part in fieldname.split('__'):
try:
field = model._meta.get_field(fieldname_part)
except FieldDoesNotExist:
try:
rels = model._meta.get_all_related_objects_with_model()
except AttributeError:
field = fieldname
else:
for relobj, _ in rels:
if relobj.get_accessor_name() == fieldname_part:
field = relobj.field
model = field.model
break
else:
model = get_related_model(field)
yield field
def is_values_queryset(qs):
if django.VERSION < (1, 9): # pragma: no cover
return isinstance(qs, django.db.models.query.ValuesQuerySet)
else:
return qs._iterable_class == django.db.models.query.ValuesIterable
def read_frame(qs, fieldnames=(), index_col=None, coerce_float=False,
verbose=True, datetime_index=False, column_names=None):
"""
Returns a dataframe from a QuerySet
Optionally specify the field names/columns to utilize and
a field as the index
Parameters
----------
qs: The Django QuerySet.
fieldnames: The model field names to use in creating the frame.
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
You can span a relationship in the usual Django way
by using double underscores to specify a related field
in another model
index_col: specify the field to use for the index. If the index
field is not in the field list it will be appended
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric data (like
decimal.Decimal) to floating point, useful for SQL result sets
verbose: boolean If this is ``True`` then populate the DataFrame with the
human readable versions of any foreign key fields else use
the primary keys values.
The human readable version of the foreign key field is
defined in the ``__unicode__`` or ``__str__``
methods of the related class definition
datetime_index: specify whether index should be converted to a
DateTimeIndex.
column_names: If not None, use to override the column names in the
DateFrame
"""
if fieldnames:
fieldnames = pd.unique(fieldnames)
if index_col is not None and index_col not in fieldnames:
# Add it to the field names if not already there
fieldnames = tuple(fieldnames) + (index_col,)
if column_names:
column_names = tuple(column_names) + (index_col,)
fields = to_fields(qs, fieldnames)
elif is_values_queryset(qs):
if django.VERSION < (1, 9): # pragma: no cover
annotation_field_names = list(qs.query.annotation_select)
if annotation_field_names is None:
annotation_field_names = []
extra_field_names = qs.extra_names
if extra_field_names is None:
extra_field_names = []
select_field_names = qs.field_names
else: # pragma: no cover
annotation_field_names = list(qs.query.annotation_select)
extra_field_names = list(qs.query.extra_select)
select_field_names = list(qs.query.values_select)
fieldnames = select_field_names + annotation_field_names + \
extra_field_names
fields = [None if '__' in f else qs.model._meta.get_field(f)
for f in select_field_names] + \
[None] * (len(annotation_field_names) + len(extra_field_names))
uniq_fields = set()
fieldnames, fields = zip(
*(f for f in zip(fieldnames, fields)
if f[0] not in uniq_fields and not uniq_fields.add(f[0])))
else:
fields = qs.model._meta.fields
fieldnames = [f.name for f in fields]
fieldnames += list(qs.query.annotation_select.keys())
if is_values_queryset(qs):
recs = list(qs)
else:
recs = list(qs.values_list(*fieldnames))
df = pd.DataFrame.from_records(
recs,
columns=column_names if column_names else fieldnames,
coerce_float=coerce_float
)
if verbose:
update_with_verbose(df, fieldnames, fields)
if index_col is not None:
df.set_index(index_col, inplace=True)
if datetime_index:
df.index = pd.to_datetime(df.index, errors="ignore")
return df
| bsd-3-clause |
h2educ/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
khrapovs/multidensity | multidensity/multidensity.py | 1 | 6862 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
Generic Class for Multivariate Distributions
============================================
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pylab as plt
import seaborn as sns
from scipy.special import gamma
from scipy.optimize import minimize, brentq
from scipy.integrate import nquad
__all__ = ['MultiDensity']
class MultiDensity(object):
"""Multidimensional density.
Attributes
----------
eta : array_like
Degrees of freedom. :math:`2 < \eta < \infty`
lam : array_like
Asymmetry. :math:`0 < \lambda < \infty`
data : array_like
Data grid
Methods
-------
pdf
Probability density function
likelihood
Log-likelihood function
fit_mle
Fit parameters with MLE
"""
def __init__(self, ndim=None, eta=None, lam=None, data=None):
"""Initialize the class.
Parameters
----------
ndim : int
Number of dimensions
eta : array_like
Degrees of freedom
lam : array_like
Asymmetry
data : array_like
Data grid
"""
if ndim is None:
raise ValueError('Please, provide dimension!')
else:
self.ndim = ndim
self.eta = None
self.lam = None
self.data = None
if eta is not None:
self.eta = np.atleast_1d(eta)
if lam is not None:
self.lam = np.atleast_1d(lam)
if data is not None:
self.data = np.atleast_2d(data)
def bounds(self):
"""Parameter bounds."""
return None
def const_a(self):
"""Compute a constant.
Returns
-------
float
"""
return gamma((self.eta - 1) / 2) / gamma(self.eta / 2) \
* ((self.eta - 2) / np.pi) ** .5 * (self.lam - 1. / self.lam)
def const_b(self):
"""Compute b constant.
Returns
-------
float
"""
return (self.lam ** 2 + self.lam ** (-2) - 1 - self.const_a() ** 2)**.5
def pdf(self, data=None):
"""Probability density function (PDF).
Parameters
----------
data : array_like
Grid of point to evaluate PDF at.
(k,) - one observation, k dimensions
(T, k) - T observations, k dimensions
Returns
-------
(T, ) array
PDF values
"""
if data is None:
raise ValueError('No data given!')
return np.prod(self.marginals(data), axis=1)
def pdf_vec(self, data=None):
"""Vectorized version of the univariate PDF.
Parameters
----------
data : array_like
Grid of point to evaluate PDF at
Returns
-------
array
Univariate PDF values. Same dimension as input.
"""
return np.vectorize(self.pdf)(data)
def pdf_args(self, *args):
"""PDF with ordered argument signature, f(x0,...,xn).
"""
return self.pdf(data=np.array(args))
def likelihood(self, theta=[10., 10, .5, 1.5]):
"""Log-likelihood function.
Parameters
----------
theta : array_like
Density parameters
Returns
-------
float
Log-likelihood values. Same shape as the input.
"""
if theta is None:
raise ValueError('No parameter given!')
self.from_theta(theta)
try:
return -np.log(self.pdf(self.data)).mean()
except ValueError:
return 1e10
def fit_mle(self, theta_start=None, method='Nelder-Mead'):
"""Fit parameters with MLE.
Parameters
----------
theta_start : array_like
Density parameters
method : str
Optimization method
Returns
-------
array
Log-likelihood values. Same shape as the input.
"""
ndim = self.data.shape[1]
if theta_start is None:
theta_start = self.theta_start(ndim)
if self.bounds() is None:
bounds = self.ndim * [(None, None)]
else:
bounds = self.bounds()
return minimize(self.likelihood, theta_start, method=method,
bounds=bounds)
def cdf(self, values):
"""CDF function.
Parameters
----------
values : array_like
Argument of CDF. One for each dimension.
Returns
-------
float
Value of CDF
"""
if isinstance(values, float):
ndim = 1
values = np.array([values])
else:
ndim = len(values)
ranges = list(zip(- np.ones(ndim) * 5, values))
return nquad(self.pdf_args, ranges)[0]
def cdf_vec(self, values):
"""Vectorized version of the CDF.
Parameters
----------
values : array_like
(T, k) argument of CDF. One for each dimension.
Returns
-------
(T, ) array
Value of CDF
"""
return np.vectorize(self.cdf)(values)
def ppf(self, value):
"""Inverse univariate CDF function.
Parameters
----------
value : float
Value of univariate CDF
Returns
-------
float
Quantile for one observation
"""
if len(self.lam) > 1:
raise ValueError('The density object is multivariate.\
Need one dimension!')
return brentq(lambda x: self.cdf(x) - value, -10, 10)
def ppf_vec(self, values):
"""Vectorized version of the Inverse CDF function.
Parameters
----------
values : array_like
Values of CDF
Returns
-------
array
Quantiles at arbitrary points
"""
return np.vectorize(self.ppf)(values)
def copula_density(self, args):
"""Copula density.
Parameters
----------
args : (ndim, ) array
Vector with each element in (0, 1)
Returns
-------
float
Compula density
"""
return self.pdf(*self.ppf_vec(args))
def plot_bidensity(self):
"""Plot bivariate density.
"""
ndots = 100
xgrid = np.linspace(-2, 2, ndots)
ygrid = np.linspace(-2, 2, ndots)
xgrid, ygrid = np.meshgrid(xgrid, ygrid)
data = np.vstack((xgrid.flatten(), ygrid.flatten())).T
zvalues = self.pdf(data).reshape((ndots, ndots))
plt.contourf(xgrid, ygrid, zvalues)
plt.axis('square')
plt.title(self.get_name())
plt.show()
| mit |
catapult-project/catapult | experimental/plot_bisect_results.py | 4 | 4272 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to plot the results of a bisect run."""
import argparse
import json
import math
import re
import urllib2
from matplotlib import cm # pylint: disable=import-error
from matplotlib import pyplot # pylint: disable=import-error
import numpy # pylint: disable=import-error
_PLOT_WIDTH_INCHES = 8
_PLOT_HEIGHT_INCHES = 6
_PERCENTILES = (0, 0.05, 0.25, 0.5, 0.75, 0.95, 1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('bisect_url_or_debug_info_file',
help='The Buildbot URL of a bisect run, or a file '
'containing the output from the Debug Info step.')
parser.add_argument('output', nargs='?', help='File path to save a PNG to.')
args = parser.parse_args()
url = (args.bisect_url_or_debug_info_file +
'/steps/Debug%20Info/logs/Debug%20Info/text')
try:
f = urllib2.urlopen(url)
except ValueError: # Not a valid URL.
f = open(args.bisect_url_or_debug_info_file, 'r')
results = []
for line in f.readlines():
regex = (r'(?:(?:[a-z0-9-]+@)?[a-z0-9]+,)*'
r'(?:[a-z0-9-]+@)?(?P<commit>[a-z0-9]+)\s*'
r'(?P<values>\[(?:-?[0-9.]+, )*-?[0-9.]*\])')
match = re.match(regex, line)
if not match:
continue
commit = match.group('commit')
values = json.loads(match.group('values'))
if not values:
continue
print commit, values
results.append((commit, values))
_SavePlots(results, args.output)
def _SavePlots(results, file_path=None):
"""Saves histograms and empirial distribution plots showing the diff.
Args:
file_path: The location to save the plots go.
"""
figsize = (_PLOT_WIDTH_INCHES * 2, _PLOT_HEIGHT_INCHES)
_, (axis0, axis1) = pyplot.subplots(nrows=1, ncols=2, figsize=figsize)
_DrawHistogram(axis0, results)
_DrawEmpiricalCdf(axis1, results)
if file_path:
pyplot.savefig(file_path)
pyplot.show()
pyplot.close()
def _DrawHistogram(axis, results):
values_per_commit = [values for _, values in results]
# Calculate bounds and bins.
combined_values = sum(values_per_commit, [])
lower_bound = min(combined_values)
upper_bound = max(combined_values)
if lower_bound == upper_bound:
lower_bound -= 0.5
upper_bound += 0.5
bins = numpy.linspace(lower_bound, upper_bound,
math.log(len(combined_values)) * 4)
# Histograms.
colors = cm.rainbow(numpy.linspace( # pylint: disable=no-member
1, 0, len(results) + 1))
for (commit, values), color in zip(results, colors):
axis.hist(values, bins, alpha=0.5, normed=True, histtype='stepfilled',
label='%s (n=%d)' % (commit, len(values)), color=color)
# Vertical lines denoting the medians.
medians = tuple(numpy.percentile(values, 50) for values in values_per_commit)
axis.set_xticks(medians, minor=True)
axis.grid(which='minor', axis='x', linestyle='--')
# Axis labels and legend.
#axis.set_xlabel(step.metric_name)
axis.set_ylabel('Relative probability')
axis.legend(loc='upper right')
def _DrawEmpiricalCdf(axis, results):
colors = cm.rainbow(numpy.linspace( # pylint: disable=no-member
1, 0, len(results) + 1))
for (commit, values), color in zip(results, colors):
# Empirical distribution function.
levels = numpy.linspace(0, 1, len(values) + 1)
axis.step(sorted(values) + [max(values)], levels,
label='%s (n=%d)' % (commit, len(values)), color=color)
# Dots denoting the percentiles.
axis.plot(numpy.percentile(values, tuple(p * 100 for p in _PERCENTILES)),
_PERCENTILES, '.', color=color)
axis.set_yticks(_PERCENTILES)
# Vertical lines denoting the medians.
values_per_commit = [values for _, values in results]
medians = tuple(numpy.percentile(values, 50) for values in values_per_commit)
axis.set_xticks(medians, minor=True)
axis.grid(which='minor', axis='x', linestyle='--')
# Axis labels and legend.
#axis.set_xlabel(step.metric_name)
axis.set_ylabel('Cumulative probability')
axis.legend(loc='lower right')
if __name__ == '__main__':
main()
| bsd-3-clause |
DmitryOdinoky/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hpsModel-sax-phrase.py | 24 | 1834 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import hpsModel as HPS
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/sax-phrase-short.wav'))
w = np.blackman(601)
N = 1024
t = -100
nH = 100
minf0 = 350
maxf0 = 700
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
stocf = .2
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, hphase, mYst, Ns, H, fs)
maxplotfreq = 10000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.autoscale(tight=True)
plt.title('x (sax-phrase-short.wav)')
plt.subplot(312)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('harmonics + stochastic')
plt.subplot(313)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.autoscale(tight=True)
plt.title('y')
plt.tight_layout()
plt.savefig('hpsModel-sax-phrase.png')
UF.wavwrite(y, fs, 'sax-phrase-hps-synthesis.wav')
UF.wavwrite(yh, fs, 'sax-phrase-harmonic.wav')
UF.wavwrite(yst, fs, 'sax-phrase-stochastic.wav')
plt.show()
| agpl-3.0 |
Odingod/mne-python | mne/tests/test_source_space.py | 9 | 23354 | from __future__ import print_function
import os
import os.path as op
from nose.tools import assert_true, assert_raises
from nose.plugins.skip import SkipTest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
import warnings
from mne.datasets import testing
from mne import (read_source_spaces, vertex_to_mni, write_source_spaces,
setup_source_space, setup_volume_source_space,
add_source_space_distances, read_bem_surfaces)
from mne.utils import (_TempDir, requires_fs_or_nibabel, requires_nibabel,
requires_freesurfer, run_subprocess,
requires_mne, requires_scipy_version,
run_tests_if_main, slow_test)
from mne.surface import _accumulate_normals, _triangle_neighbors
from mne.source_space import _get_mgz_header
from mne.externals.six.moves import zip
from mne.source_space import (get_volume_labels_from_aseg, SourceSpaces,
_compare_source_spaces)
from mne.io.constants import FIFF
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
fname_vol = op.join(subjects_dir, 'sample', 'bem',
'sample-volume-7mm-src.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-bem.fif')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_small = op.join(base_dir, 'small-src.fif.gz')
@testing.requires_testing_data
@requires_nibabel(vox2ras_tkr=True)
def test_mgz_header():
"""Test MGZ header reading"""
import nibabel as nib
header = _get_mgz_header(fname_mri)
mri_hdr = nib.load(fname_mri).get_header()
assert_allclose(mri_hdr.get_data_shape(), header['dims'])
assert_allclose(mri_hdr.get_vox2ras_tkr(), header['vox2ras_tkr'])
assert_allclose(mri_hdr.get_ras2vox(), header['ras2vox'])
@requires_scipy_version('0.11')
def test_add_patch_info():
"""Test adding patch info to source space"""
# let's setup a small source space
src = read_source_spaces(fname_small)
src_new = read_source_spaces(fname_small)
for s in src_new:
s['nearest'] = None
s['nearest_dist'] = None
s['pinfo'] = None
# test that no patch info is added for small dist_limit
try:
add_source_space_distances(src_new, dist_limit=0.00001)
except RuntimeError: # what we throw when scipy version is wrong
pass
else:
assert_true(all(s['nearest'] is None for s in src_new))
assert_true(all(s['nearest_dist'] is None for s in src_new))
assert_true(all(s['pinfo'] is None for s in src_new))
# now let's use one that works
add_source_space_distances(src_new)
for s1, s2 in zip(src, src_new):
assert_array_equal(s1['nearest'], s2['nearest'])
assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7)
assert_equal(len(s1['pinfo']), len(s2['pinfo']))
for p1, p2 in zip(s1['pinfo'], s2['pinfo']):
assert_array_equal(p1, p2)
@testing.requires_testing_data
@requires_scipy_version('0.11')
def test_add_source_space_distances_limited():
"""Test adding distances to source space with a dist_limit"""
tempdir = _TempDir()
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 200 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = op.join(tempdir, 'temp-src.fif')
try:
add_source_space_distances(src_new, dist_limit=0.007)
except RuntimeError: # what we throw when scipy version is wrong
raise SkipTest('dist_limit requires scipy > 0.13')
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
for so, sn in zip(src, src_new):
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code
do.data[do.data > 0.007] = 0
do.eliminate_zeros()
# make sure we have some comparable distances
assert_true(np.sum(do.data < 0.007) > 400)
# do comparison over the region computed
d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
@slow_test
@testing.requires_testing_data
@requires_scipy_version('0.11')
def test_add_source_space_distances():
"""Test adding distances to source space"""
tempdir = _TempDir()
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 20 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = op.join(tempdir, 'temp-src.fif')
add_source_space_distances(src_new)
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
# iterate over both hemispheres
for so, sn in zip(src, src_new):
v = so['vertno'][:n_do]
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([np.inf], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code (some residual), and Python
ds = list()
for d in [do, dn]:
d.data[d.data > 0.007] = 0
d = d[v][:, v]
d.eliminate_zeros()
ds.append(d)
# make sure we actually calculated some comparable distances
assert_true(np.sum(ds[0].data < 0.007) > 10)
# do comparison
d = ds[0] - ds[1]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-9)
@testing.requires_testing_data
@requires_mne
def test_discrete_source_space():
"""Test setting up (and reading/writing) discrete source spaces
"""
tempdir = _TempDir()
src = read_source_spaces(fname)
v = src[0]['vertno']
# let's make a discrete version with the C code, and with ours
temp_name = op.join(tempdir, 'temp-src.fif')
try:
# save
temp_pos = op.join(tempdir, 'temp-pos.txt')
np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]])
# let's try the spherical one (no bem or surf supplied)
run_subprocess(['mne_volume_source_space', '--meters',
'--pos', temp_pos, '--src', temp_name])
src_c = read_source_spaces(temp_name)
pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
src_new = setup_volume_source_space('sample', None,
pos=pos_dict,
subjects_dir=subjects_dir)
_compare_source_spaces(src_c, src_new, mode='approx')
assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
rtol=1e-3, atol=1e-6)
assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
rtol=1e-3, atol=1e-6)
# now do writing
write_source_spaces(temp_name, src_c)
src_c2 = read_source_spaces(temp_name)
_compare_source_spaces(src_c, src_c2)
# now do MRI
assert_raises(ValueError, setup_volume_source_space, 'sample',
pos=pos_dict, mri=fname_mri)
finally:
if op.isfile(temp_name):
os.remove(temp_name)
@slow_test
@testing.requires_testing_data
def test_volume_source_space():
"""Test setting up volume source spaces
"""
tempdir = _TempDir()
src = read_source_spaces(fname_vol)
temp_name = op.join(tempdir, 'temp-src.fif')
surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
surf['rr'] *= 1e3 # convert to mm
# The one in the testing dataset (uses bem as bounds)
for bem, surf in zip((fname_bem, None), (None, surf)):
src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
bem=bem, surface=surf,
mri=fname_mri,
subjects_dir=subjects_dir)
_compare_source_spaces(src, src_new, mode='approx')
del src_new
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx')
assert_raises(IOError, setup_volume_source_space, 'sample', temp_name,
pos=7.0, bem=None, surface='foo', # bad surf
mri=fname_mri, subjects_dir=subjects_dir)
@testing.requires_testing_data
@requires_mne
def test_other_volume_source_spaces():
"""Test setting up other volume source spaces"""
# these are split off because they require the MNE tools, and
# Travis doesn't seem to like them
# let's try the spherical one (no bem or surf supplied)
tempdir = _TempDir()
temp_name = op.join(tempdir, 'temp-src.fif')
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name,
'--mri', fname_mri])
src = read_source_spaces(temp_name)
src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
mri=fname_mri,
subjects_dir=subjects_dir)
_compare_source_spaces(src, src_new, mode='approx')
del src
del src_new
assert_raises(ValueError, setup_volume_source_space, 'sample', temp_name,
pos=7.0, sphere=[1., 1.], mri=fname_mri, # bad sphere
subjects_dir=subjects_dir)
# now without MRI argument, it should give an error when we try
# to read it
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name])
assert_raises(ValueError, read_source_spaces, temp_name)
@testing.requires_testing_data
def test_triangle_neighbors():
"""Test efficient vertex neighboring triangles for surfaces"""
this = read_source_spaces(fname)[0]
this['neighbor_tri'] = [list() for _ in range(this['np'])]
for p in range(this['ntri']):
verts = this['tris'][p]
this['neighbor_tri'][verts[0]].append(p)
this['neighbor_tri'][verts[1]].append(p)
this['neighbor_tri'][verts[2]].append(p)
this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
neighbor_tri = _triangle_neighbors(this['tris'], this['np'])
assert_true(np.array_equal(nt1, nt2)
for nt1, nt2 in zip(neighbor_tri, this['neighbor_tri']))
def test_accumulate_normals():
"""Test efficient normal accumulation for surfaces"""
# set up comparison
rng = np.random.RandomState(0)
n_pts = int(1.6e5) # approx number in sample source space
n_tris = int(3.2e5)
# use all positive to make a worst-case for cumulative summation
# (real "nn" vectors will have both positive and negative values)
tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int)
tris = np.c_[tris, tris + 1, tris + 2]
tri_nn = rng.rand(n_tris, 3)
this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn)
# cut-and-paste from original code in surface.py:
# Find neighboring triangles and accumulate vertex normals
this['nn'] = np.zeros((this['np'], 3))
for p in range(this['ntri']):
# vertex normals
verts = this['tris'][p]
this['nn'][verts, :] += this['tri_nn'][p, :]
nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
# the moment of truth (or reckoning)
assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)
@slow_test
@testing.requires_testing_data
def test_setup_source_space():
"""Test setting up ico, oct, and all source spaces
"""
tempdir = _TempDir()
fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
# first lets test some input params
assert_raises(ValueError, setup_source_space, 'sample', spacing='oct',
add_dist=False)
assert_raises(ValueError, setup_source_space, 'sample', spacing='octo',
add_dist=False)
assert_raises(ValueError, setup_source_space, 'sample', spacing='oct6e',
add_dist=False)
assert_raises(ValueError, setup_source_space, 'sample', spacing='7emm',
add_dist=False)
assert_raises(ValueError, setup_source_space, 'sample', spacing='alls',
add_dist=False)
assert_raises(IOError, setup_source_space, 'sample', spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
# ico 5 (fsaverage) - write to temp file
src = read_source_spaces(fname_ico)
temp_name = op.join(tempdir, 'temp-src.fif')
with warnings.catch_warnings(record=True): # sklearn equiv neighbors
warnings.simplefilter('always')
src_new = setup_source_space('fsaverage', temp_name, spacing='ico5',
subjects_dir=subjects_dir, add_dist=False,
overwrite=True)
_compare_source_spaces(src, src_new, mode='approx')
assert_array_equal(src[0]['vertno'], np.arange(10242))
assert_array_equal(src[1]['vertno'], np.arange(10242))
# oct-6 (sample) - auto filename + IO
src = read_source_spaces(fname)
temp_name = op.join(tempdir, 'temp-src.fif')
with warnings.catch_warnings(record=True): # sklearn equiv neighbors
warnings.simplefilter('always')
src_new = setup_source_space('sample', temp_name, spacing='oct6',
subjects_dir=subjects_dir,
overwrite=True, add_dist=False)
_compare_source_spaces(src, src_new, mode='approx')
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx')
# all source points - no file writing
src_new = setup_source_space('sample', None, spacing='all',
subjects_dir=subjects_dir, add_dist=False)
assert_true(src_new[0]['nuse'] == len(src_new[0]['rr']))
assert_true(src_new[1]['nuse'] == len(src_new[1]['rr']))
# dense source space to hit surf['inuse'] lines of _create_surf_spacing
assert_raises(RuntimeError, setup_source_space, 'sample', None,
spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
@testing.requires_testing_data
def test_read_source_spaces():
"""Test reading of source space meshes
"""
src = read_source_spaces(fname, patch_stats=True)
# 3D source space
lh_points = src[0]['rr']
lh_faces = src[0]['tris']
lh_use_faces = src[0]['use_tris']
rh_points = src[1]['rr']
rh_faces = src[1]['tris']
rh_use_faces = src[1]['use_tris']
assert_true(lh_faces.min() == 0)
assert_true(lh_faces.max() == lh_points.shape[0] - 1)
assert_true(lh_use_faces.min() >= 0)
assert_true(lh_use_faces.max() <= lh_points.shape[0] - 1)
assert_true(rh_faces.min() == 0)
assert_true(rh_faces.max() == rh_points.shape[0] - 1)
assert_true(rh_use_faces.min() >= 0)
assert_true(rh_use_faces.max() <= rh_points.shape[0] - 1)
@slow_test
@testing.requires_testing_data
def test_write_source_space():
"""Test reading and writing of source spaces
"""
tempdir = _TempDir()
src0 = read_source_spaces(fname, patch_stats=False)
write_source_spaces(op.join(tempdir, 'tmp-src.fif'), src0)
src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'),
patch_stats=False)
_compare_source_spaces(src0, src1)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_badname = op.join(tempdir, 'test-bad-name.fif.gz')
write_source_spaces(src_badname, src0)
read_source_spaces(src_badname)
assert_equal(len(w), 2)
@testing.requires_testing_data
@requires_fs_or_nibabel
def test_vertex_to_mni():
"""Test conversion of vertices to MNI coordinates
"""
# obtained using "tksurfer (sample) (l/r)h white"
vertices = [100960, 7620, 150549, 96761]
coords = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36],
[-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]])
hemis = [0, 0, 0, 1]
coords_2 = vertex_to_mni(vertices, hemis, 'sample', subjects_dir)
# less than 1mm error
assert_allclose(coords, coords_2, atol=1.0)
@testing.requires_testing_data
@requires_freesurfer
@requires_nibabel()
def test_vertex_to_mni_fs_nibabel():
"""Test equivalence of vert_to_mni for nibabel and freesurfer
"""
n_check = 1000
subject = 'sample'
vertices = np.random.randint(0, 100000, n_check)
hemis = np.random.randint(0, 1, n_check)
coords = vertex_to_mni(vertices, hemis, subject, subjects_dir,
'nibabel')
coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir,
'freesurfer')
# less than 0.1 mm error
assert_allclose(coords, coords_2, atol=0.1)
@testing.requires_testing_data
@requires_freesurfer
@requires_nibabel()
def test_get_volume_label_names():
"""Test reading volume label names
"""
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
label_names = get_volume_labels_from_aseg(aseg_fname)
assert_equal(label_names.count('Brain-Stem'), 1)
@testing.requires_testing_data
@requires_freesurfer
@requires_nibabel()
def test_source_space_from_label():
"""Test generating a source space from volume label
"""
tempdir = _TempDir()
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
label_names = get_volume_labels_from_aseg(aseg_fname)
volume_label = label_names[int(np.random.rand() * len(label_names))]
# Test pos as dict
pos = dict()
assert_raises(ValueError, setup_volume_source_space, 'sample', pos=pos,
volume_label=volume_label, mri=aseg_fname)
# Test no mri provided
assert_raises(RuntimeError, setup_volume_source_space, 'sample', mri=None,
volume_label=volume_label)
# Test invalid volume label
assert_raises(ValueError, setup_volume_source_space, 'sample',
volume_label='Hello World!', mri=aseg_fname)
src = setup_volume_source_space('sample', subjects_dir=subjects_dir,
volume_label=volume_label, mri=aseg_fname,
add_interpolator=False)
assert_equal(volume_label, src[0]['seg_name'])
# test reading and writing
out_name = op.join(tempdir, 'temp-src.fif')
write_source_spaces(out_name, src)
src_from_file = read_source_spaces(out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
@testing.requires_testing_data
@requires_freesurfer
@requires_nibabel()
def test_combine_source_spaces():
"""Test combining source spaces
"""
tempdir = _TempDir()
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
label_names = get_volume_labels_from_aseg(aseg_fname)
volume_labels = [label_names[int(np.random.rand() * len(label_names))]
for ii in range(2)]
# get a surface source space (no need to test creation here)
srf = read_source_spaces(fname, patch_stats=False)
# setup 2 volume source spaces
vol = setup_volume_source_space('sample', subjects_dir=subjects_dir,
volume_label=volume_labels[0],
mri=aseg_fname, add_interpolator=False)
# setup a discrete source space
rr = np.random.randint(0, 20, (100, 3)) * 1e-3
nn = np.zeros(rr.shape)
nn[:, -1] = 1
pos = {'rr': rr, 'nn': nn}
disc = setup_volume_source_space('sample', subjects_dir=subjects_dir,
pos=pos, verbose='error')
# combine source spaces
src = srf + vol + disc
# test addition of source spaces
assert_equal(type(src), SourceSpaces)
assert_equal(len(src), 4)
# test reading and writing
src_out_name = op.join(tempdir, 'temp-src.fif')
src.save(src_out_name)
src_from_file = read_source_spaces(src_out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
# test that all source spaces are in MRI coordinates
coord_frames = np.array([s['coord_frame'] for s in src])
assert_true((coord_frames == FIFF.FIFFV_COORD_MRI).all())
# test errors for export_volume
image_fname = op.join(tempdir, 'temp-image.mgz')
# source spaces with no volume
assert_raises(ValueError, srf.export_volume, image_fname, verbose='error')
# unrecognized source type
disc2 = disc.copy()
disc2[0]['type'] = 'kitty'
src_unrecognized = src + disc2
assert_raises(ValueError, src_unrecognized.export_volume, image_fname,
verbose='error')
# unrecognized file type
bad_image_fname = op.join(tempdir, 'temp-image.png')
assert_raises(ValueError, src.export_volume, bad_image_fname,
verbose='error')
# mixed coordinate frames
disc3 = disc.copy()
disc3[0]['coord_frame'] = 10
src_mixed_coord = src + disc3
assert_raises(ValueError, src_mixed_coord.export_volume, image_fname,
verbose='error')
run_tests_if_main()
# The following code was used to generate small-src.fif.gz.
# Unfortunately the C code bombs when trying to add source space distances,
# possibly due to incomplete "faking" of a smaller surface on our part here.
"""
# -*- coding: utf-8 -*-
import os
import numpy as np
import mne
data_path = mne.datasets.sample.data_path()
src = mne.setup_source_space('sample', fname=None, spacing='oct5')
hemis = ['lh', 'rh']
fnames = [data_path + '/subjects/sample/surf/%s.decimated' % h for h in hemis]
vs = list()
for s, fname in zip(src, fnames):
coords = s['rr'][s['vertno']]
vs.append(s['vertno'])
idx = -1 * np.ones(len(s['rr']))
idx[s['vertno']] = np.arange(s['nuse'])
faces = s['use_tris']
faces = idx[faces]
mne.write_surface(fname, coords, faces)
# we need to move sphere surfaces
spheres = [data_path + '/subjects/sample/surf/%s.sphere' % h for h in hemis]
for s in spheres:
os.rename(s, s + '.bak')
try:
for s, v in zip(spheres, vs):
coords, faces = mne.read_surface(s + '.bak')
coords = coords[v]
mne.write_surface(s, coords, faces)
src = mne.setup_source_space('sample', fname=None, spacing='oct4',
surface='decimated')
finally:
for s in spheres:
os.rename(s + '.bak', s)
fname = 'small-src.fif'
fname_gz = fname + '.gz'
mne.write_source_spaces(fname, src)
mne.utils.run_subprocess(['mne_add_patch_info', '--src', fname,
'--srcp', fname])
mne.write_source_spaces(fname_gz, mne.read_source_spaces(fname))
"""
| bsd-3-clause |
DuCorey/bokeh | examples/models/file/colors.py | 2 | 9054 | from __future__ import print_function
from math import pi
import pandas as pd
from bokeh.models import (
Plot, ColumnDataSource, FactorRange, CategoricalAxis, TapTool, HoverTool, OpenURL, CategoricalScale)
from bokeh.models.glyphs import Rect
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
css3_colors = pd.DataFrame([
("Pink", "#FFC0CB", "Pink"),
("LightPink", "#FFB6C1", "Pink"),
("HotPink", "#FF69B4", "Pink"),
("DeepPink", "#FF1493", "Pink"),
("PaleVioletRed", "#DB7093", "Pink"),
("MediumVioletRed", "#C71585", "Pink"),
("LightSalmon", "#FFA07A", "Red"),
("Salmon", "#FA8072", "Red"),
("DarkSalmon", "#E9967A", "Red"),
("LightCoral", "#F08080", "Red"),
("IndianRed", "#CD5C5C", "Red"),
("Crimson", "#DC143C", "Red"),
("FireBrick", "#B22222", "Red"),
("DarkRed", "#8B0000", "Red"),
("Red", "#FF0000", "Red"),
("OrangeRed", "#FF4500", "Orange"),
("Tomato", "#FF6347", "Orange"),
("Coral", "#FF7F50", "Orange"),
("DarkOrange", "#FF8C00", "Orange"),
("Orange", "#FFA500", "Orange"),
("Yellow", "#FFFF00", "Yellow"),
("LightYellow", "#FFFFE0", "Yellow"),
("LemonChiffon", "#FFFACD", "Yellow"),
("LightGoldenrodYellow", "#FAFAD2", "Yellow"),
("PapayaWhip", "#FFEFD5", "Yellow"),
("Moccasin", "#FFE4B5", "Yellow"),
("PeachPuff", "#FFDAB9", "Yellow"),
("PaleGoldenrod", "#EEE8AA", "Yellow"),
("Khaki", "#F0E68C", "Yellow"),
("DarkKhaki", "#BDB76B", "Yellow"),
("Gold", "#FFD700", "Yellow"),
("Cornsilk", "#FFF8DC", "Brown"),
("BlanchedAlmond", "#FFEBCD", "Brown"),
("Bisque", "#FFE4C4", "Brown"),
("NavajoWhite", "#FFDEAD", "Brown"),
("Wheat", "#F5DEB3", "Brown"),
("BurlyWood", "#DEB887", "Brown"),
("Tan", "#D2B48C", "Brown"),
("RosyBrown", "#BC8F8F", "Brown"),
("SandyBrown", "#F4A460", "Brown"),
("Goldenrod", "#DAA520", "Brown"),
("DarkGoldenrod", "#B8860B", "Brown"),
("Peru", "#CD853F", "Brown"),
("Chocolate", "#D2691E", "Brown"),
("SaddleBrown", "#8B4513", "Brown"),
("Sienna", "#A0522D", "Brown"),
("Brown", "#A52A2A", "Brown"),
("Maroon", "#800000", "Brown"),
("DarkOliveGreen", "#556B2F", "Green"),
("Olive", "#808000", "Green"),
("OliveDrab", "#6B8E23", "Green"),
("YellowGreen", "#9ACD32", "Green"),
("LimeGreen", "#32CD32", "Green"),
("Lime", "#00FF00", "Green"),
("LawnGreen", "#7CFC00", "Green"),
("Chartreuse", "#7FFF00", "Green"),
("GreenYellow", "#ADFF2F", "Green"),
("SpringGreen", "#00FF7F", "Green"),
("MediumSpringGreen", "#00FA9A", "Green"),
("LightGreen", "#90EE90", "Green"),
("PaleGreen", "#98FB98", "Green"),
("DarkSeaGreen", "#8FBC8F", "Green"),
("MediumSeaGreen", "#3CB371", "Green"),
("SeaGreen", "#2E8B57", "Green"),
("ForestGreen", "#228B22", "Green"),
("Green", "#008000", "Green"),
("DarkGreen", "#006400", "Green"),
("MediumAquamarine", "#66CDAA", "Cyan"),
("Aqua", "#00FFFF", "Cyan"),
("Cyan", "#00FFFF", "Cyan"),
("LightCyan", "#E0FFFF", "Cyan"),
("PaleTurquoise", "#AFEEEE", "Cyan"),
("Aquamarine", "#7FFFD4", "Cyan"),
("Turquoise", "#40E0D0", "Cyan"),
("MediumTurquoise", "#48D1CC", "Cyan"),
("DarkTurquoise", "#00CED1", "Cyan"),
("LightSeaGreen", "#20B2AA", "Cyan"),
("CadetBlue", "#5F9EA0", "Cyan"),
("DarkCyan", "#008B8B", "Cyan"),
("Teal", "#008080", "Cyan"),
("LightSteelBlue", "#B0C4DE", "Blue"),
("PowderBlue", "#B0E0E6", "Blue"),
("LightBlue", "#ADD8E6", "Blue"),
("SkyBlue", "#87CEEB", "Blue"),
("LightSkyBlue", "#87CEFA", "Blue"),
("DeepSkyBlue", "#00BFFF", "Blue"),
("DodgerBlue", "#1E90FF", "Blue"),
("CornflowerBlue", "#6495ED", "Blue"),
("SteelBlue", "#4682B4", "Blue"),
("RoyalBlue", "#4169E1", "Blue"),
("Blue", "#0000FF", "Blue"),
("MediumBlue", "#0000CD", "Blue"),
("DarkBlue", "#00008B", "Blue"),
("Navy", "#000080", "Blue"),
("MidnightBlue", "#191970", "Blue"),
("Lavender", "#E6E6FA", "Purple"),
("Thistle", "#D8BFD8", "Purple"),
("Plum", "#DDA0DD", "Purple"),
("Violet", "#EE82EE", "Purple"),
("Orchid", "#DA70D6", "Purple"),
("Fuchsia", "#FF00FF", "Purple"),
("Magenta", "#FF00FF", "Purple"),
("MediumOrchid", "#BA55D3", "Purple"),
("MediumPurple", "#9370DB", "Purple"),
("BlueViolet", "#8A2BE2", "Purple"),
("DarkViolet", "#9400D3", "Purple"),
("DarkOrchid", "#9932CC", "Purple"),
("DarkMagenta", "#8B008B", "Purple"),
("Purple", "#800080", "Purple"),
("Indigo", "#4B0082", "Purple"),
("DarkSlateBlue", "#483D8B", "Purple"),
("SlateBlue", "#6A5ACD", "Purple"),
("MediumSlateBlue", "#7B68EE", "Purple"),
("White", "#FFFFFF", "White"),
("Snow", "#FFFAFA", "White"),
("Honeydew", "#F0FFF0", "White"),
("MintCream", "#F5FFFA", "White"),
("Azure", "#F0FFFF", "White"),
("AliceBlue", "#F0F8FF", "White"),
("GhostWhite", "#F8F8FF", "White"),
("WhiteSmoke", "#F5F5F5", "White"),
("Seashell", "#FFF5EE", "White"),
("Beige", "#F5F5DC", "White"),
("OldLace", "#FDF5E6", "White"),
("FloralWhite", "#FFFAF0", "White"),
("Ivory", "#FFFFF0", "White"),
("AntiqueWhite", "#FAEBD7", "White"),
("Linen", "#FAF0E6", "White"),
("LavenderBlush", "#FFF0F5", "White"),
("MistyRose", "#FFE4E1", "White"),
("Gainsboro", "#DCDCDC", "Gray/Black"),
("LightGray", "#D3D3D3", "Gray/Black"),
("Silver", "#C0C0C0", "Gray/Black"),
("DarkGray", "#A9A9A9", "Gray/Black"),
("Gray", "#808080", "Gray/Black"),
("DimGray", "#696969", "Gray/Black"),
("LightSlateGray", "#778899", "Gray/Black"),
("SlateGray", "#708090", "Gray/Black"),
("DarkSlateGray", "#2F4F4F", "Gray/Black"),
("Black", "#000000", "Gray/Black"),
], columns=["Name", "Color", "Group"])
source = ColumnDataSource(dict(
names = list(css3_colors.Name),
groups = list(css3_colors.Group),
colors = list(css3_colors.Color),
))
xdr = FactorRange(factors=list(css3_colors.Group.unique()))
ydr = FactorRange(factors=list(reversed(css3_colors.Name)))
x_scale, y_scale = CategoricalScale(), CategoricalScale()
plot = Plot(x_range=xdr, y_range=ydr, x_scale=x_scale, y_scale=y_scale, plot_width=600, plot_height=2000)
plot.title.text = "CSS3 Color Names"
rect = Rect(x="groups", y="names", width=1, height=1, fill_color="colors", line_color=None)
rect_renderer = plot.add_glyph(source, rect)
xaxis_above = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_above, 'above')
xaxis_below = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_below, 'below')
plot.add_layout(CategoricalAxis(), 'left')
url = "http://www.colors.commutercreative.com/@names/"
tooltips = """Click the color to go to:<br /><a href="{url}">{url}</a>""".format(url=url)
tap = TapTool(renderers=[rect_renderer], callback=OpenURL(url=url))
hover = HoverTool(renderers=[rect_renderer], tooltips=tooltips)
plot.tools.extend([tap, hover])
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "colors.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "CSS3 Color Names"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
priscillaboyd/SPaT_Prediction | src/preprocessing/Merger.py | 1 | 2156 | # Copyright 2017 Priscilla Boyd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The Merger class combines the data by merging all phase data and I/O detection data into a single data set CSV file.
"""
import pandas as pd
from tools.Utils import results_folder, output_fields
def data_merge(detector_fields):
"""
Combine all processed data into a single dataset file.
:param list[str] detector_fields: list of strings with detector names
:return: location of dataset
:rtype: string
"""
print("Merging final data...")
# load files that contain phase and I/O processed data and store as dfs
phase_data = pd.read_csv(results_folder + 'phases/processed/clean_merged_phases.csv', header=0,
skipinitialspace=True, usecols=output_fields)
detection_data = pd.read_csv(results_folder + 'io/io_out.csv', header=0, skipinitialspace=True,
usecols=detector_fields)
phase_df = pd.DataFrame(phase_data)
detection_df = pd.DataFrame(detection_data)
# merge the two files based on their Date and Time fields
output = pd.merge(phase_df, detection_df, on=['Date', 'Time'])
# store the output with any duplicates dropped and create a final CSV file
merged_df = output.drop_duplicates()
merged_df.to_csv(results_folder + 'dataset.csv', sep=',', index=False)
print("Data merged!")
print("Main dataset available: " + results_folder + 'dataset.csv')
# return location of dataset
return results_folder + 'dataset.csv'
| apache-2.0 |
wizmer/NeuroM | neurom/view/common.py | 1 | 16929 | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functionality for styling plots."""
from pathlib import Path
import numpy as np
from matplotlib.patches import Polygon
# needed so that projection='3d' works with fig.add_subplot
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import
from scipy.linalg import norm
from scipy.spatial import ConvexHull
plt = None # refer to _get_plt()
def _get_plt():
"""Wrapper to avoid loading matplotlib.pyplot before someone has a chance to set the backend."""
global plt # pylint: disable=global-statement
import matplotlib.pyplot # pylint: disable=import-outside-toplevel
plt = matplotlib.pyplot
def dict_if_none(arg):
"""Return an empty dict if arg is None."""
return arg if arg is not None else {}
def figure_naming(pretitle='', posttitle='', prefile='', postfile=''):
"""Returns a formatted string with the figure name and title.
Helper function to define the strings that handle pre-post conventions
for viewing - plotting title and saving options.
Args:
pretitle(str): String to include before the general title of the figure.
posttitle(str): String to include after the general title of the figure.
prefile(str): String to include before the general filename of the figure.
postfile(str): String to include after the general filename of the figure.
Returns:
str: String to include in the figure name and title, in a suitable form.
"""
if pretitle:
pretitle = "%s -- " % pretitle
if posttitle:
posttitle = " -- %s" % posttitle
if prefile:
prefile = "%s_" % prefile
if postfile:
postfile = "_%s" % postfile
return pretitle, posttitle, prefile, postfile
def get_figure(new_fig=True, subplot='111', params=None):
"""Function to be used for viewing - plotting, to initialize the matplotlib figure - axes.
Args:
new_fig(bool): Defines if a new figure will be created, if false current figure is used
subplot (tuple or matplolib subplot specifier string): Create axes with these parameters
params (dict): extra options passed to add_subplot()
Returns:
Matplotlib Figure and Axes
"""
_get_plt()
if new_fig:
fig = plt.figure()
else:
fig = plt.gcf()
params = dict_if_none(params)
if isinstance(subplot, (tuple, list)):
ax = fig.add_subplot(*subplot, **params)
else:
ax = fig.add_subplot(subplot, **params)
return fig, ax
def save_plot(fig, prefile='', postfile='', output_path='./', output_name='Figure',
output_format='png', dpi=300, transparent=False, **_):
"""Generates a figure file in the selected directory.
Args:
fig: matplotlib figure
prefile(str): Include before the general filename of the figure
postfile(str): Included after the general filename of the figure
output_path(str): Define the path to the output directory
output_name(str): String to define the name of the output figure
output_format(str): String to define the format of the output figure
dpi(int): Define the DPI (Dots per Inch) of the figure
transparent(bool): If True the saved figure will have a transparent background
"""
output_path = Path(output_path)
output_path.mkdir(parents=True, exist_ok=True)
fig.savefig(Path(output_path, prefile + output_name + postfile + "." + output_format),
dpi=dpi, transparent=transparent)
def plot_style(fig, ax, # pylint: disable=too-many-arguments, too-many-locals
# plot_title
pretitle='',
title='Figure',
posttitle='',
title_fontsize=14,
title_arg=None,
# plot_labels
label_fontsize=14,
xlabel=None,
xlabel_arg=None,
ylabel=None,
ylabel_arg=None,
zlabel=None,
zlabel_arg=None,
# plot_ticks
tick_fontsize=12,
xticks=None,
xticks_args=None,
yticks=None,
yticks_args=None,
zticks=None,
zticks_args=None,
# update_plot_limits
white_space=30,
# plot_legend
no_legend=True,
legend_arg=None,
# internal
no_axes=False,
aspect_ratio='equal',
tight=False,
**_):
"""Set the basic options of a matplotlib figure, to be used by viewing - plotting functions.
Args:
fig(matplotlib figure): figure
ax(matplotlib axes, belonging to `fig`): axes
pretitle(str): String to include before the general title of the figure
posttitle (str): String to include after the general title of the figure
title (str): Set the title for the figure
title_fontsize (int): Defines the size of the title's font
title_arg (dict): Addition arguments for matplotlib.title() call
label_fontsize(int): Size of the labels' font
xlabel(str): The xlabel for the figure
xlabel_arg(dict): Passsed into matplotlib as xlabel arguments
ylabel(str): The ylabel for the figure
ylabel_arg(dict): Passsed into matplotlib as ylabel arguments
zlabel(str): The zlabel for the figure
zlabel_arg(dict): Passsed into matplotlib as zlabel arguments
tick_fontsize (int): Defines the size of the ticks' font
xticks([list of ticks]): Defines the values of x ticks in the figure
xticks_args(dict): Passsed into matplotlib as xticks arguments
yticks([list of ticks]): Defines the values of y ticks in the figure
yticks_args(dict): Passsed into matplotlib as yticks arguments
zticks([list of ticks]): Defines the values of z ticks in the figure
zticks_args(dict): Passsed into matplotlib as zticks arguments
white_space(float): whitespace added to surround the tight limit of the data
no_legend (bool): Defines the presence of a legend in the figure
legend_arg (dict): Addition arguments for matplotlib.legend() call
no_axes(bool): If True the labels and the frame will be set off
aspect_ratio(str): Sets aspect ratio of the figure, according to matplotlib aspect_ratio
tight(bool): If True the tight layout of matplotlib will be activated
Returns:
Matplotlib figure, matplotlib axes
"""
plot_title(ax, pretitle, title, posttitle, title_fontsize, title_arg)
plot_labels(ax, label_fontsize, xlabel, xlabel_arg, ylabel, ylabel_arg, zlabel, zlabel_arg)
plot_ticks(ax, tick_fontsize, xticks, xticks_args, yticks, yticks_args, zticks, zticks_args)
update_plot_limits(ax, white_space)
plot_legend(ax, no_legend, legend_arg)
if no_axes:
ax.set_frame_on(False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if ax.name != '3d':
ax.set_aspect(aspect_ratio)
if tight:
fig.set_tight_layout(True)
def plot_title(ax, pretitle='', title='Figure', posttitle='', title_fontsize=14, title_arg=None):
"""Set title options of a matplotlib plot.
Args:
ax: matplotlib axes
pretitle(str): String to include before the general title of the figure
posttitle (str): String to include after the general title of the figure
title (str): Set the title for the figure
title_fontsize (int): Defines the size of the title's font
title_arg (dict): Addition arguments for matplotlib.title() call
"""
current_title = ax.get_title()
if not current_title:
current_title = pretitle + title + posttitle
title_arg = dict_if_none(title_arg)
ax.set_title(current_title, fontsize=title_fontsize, **title_arg)
def plot_labels(ax, label_fontsize=14,
xlabel=None, xlabel_arg=None,
ylabel=None, ylabel_arg=None,
zlabel=None, zlabel_arg=None):
"""Sets the labels options of a matplotlib plot.
Args:
ax: matplotlib axes
label_fontsize(int): Size of the labels' font
xlabel(str): The xlabel for the figure
xlabel_arg(dict): Passsed into matplotlib as xlabel arguments
ylabel(str): The ylabel for the figure
ylabel_arg(dict): Passsed into matplotlib as ylabel arguments
zlabel(str): The zlabel for the figure
zlabel_arg(dict): Passsed into matplotlib as zlabel arguments
"""
xlabel = xlabel if xlabel is not None else ax.get_xlabel() or 'X'
ylabel = ylabel if ylabel is not None else ax.get_ylabel() or 'Y'
xlabel_arg = dict_if_none(xlabel_arg)
ylabel_arg = dict_if_none(ylabel_arg)
ax.set_xlabel(xlabel, fontsize=label_fontsize, **xlabel_arg)
ax.set_ylabel(ylabel, fontsize=label_fontsize, **ylabel_arg)
if hasattr(ax, 'zaxis'):
zlabel = zlabel if zlabel is not None else ax.get_zlabel() or 'Z'
zlabel_arg = dict_if_none(zlabel_arg)
ax.set_zlabel(zlabel, fontsize=label_fontsize, **zlabel_arg)
def plot_ticks(ax, tick_fontsize=12,
xticks=None, xticks_args=None,
yticks=None, yticks_args=None,
zticks=None, zticks_args=None):
"""Function that defines the labels options of a matplotlib plot.
Args:
ax: matplotlib axes
tick_fontsize (int): Defines the size of the ticks' font
xticks([list of ticks]): Defines the values of x ticks in the figure
xticks_args(dict): Passsed into matplotlib as xticks arguments
yticks([list of ticks]): Defines the values of y ticks in the figure
yticks_args(dict): Passsed into matplotlib as yticks arguments
zticks([list of ticks]): Defines the values of z ticks in the figure
zticks_args(dict): Passsed into matplotlib as zticks arguments
"""
if xticks is not None:
ax.set_xticks(xticks)
xticks_args = dict_if_none(xticks_args)
ax.xaxis.set_tick_params(labelsize=tick_fontsize, **xticks_args)
if yticks is not None:
ax.set_yticks(yticks)
yticks_args = dict_if_none(yticks_args)
ax.yaxis.set_tick_params(labelsize=tick_fontsize, **yticks_args)
if zticks is not None:
ax.set_zticks(zticks)
zticks_args = dict_if_none(zticks_args)
ax.zaxis.set_tick_params(labelsize=tick_fontsize, **zticks_args)
def update_plot_limits(ax, white_space):
"""Sets the limit options of a matplotlib plot.
Args:
ax: matplotlib axes
white_space(float): whitespace added to surround the tight limit of the data
Note: This relies on ax.dataLim (in 2d) and ax.[xy, zz]_dataLim being set in 3d
"""
if hasattr(ax, 'zz_dataLim'):
bounds = ax.xy_dataLim.bounds
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)
bounds = ax.zz_dataLim.bounds
ax.set_zlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
else:
bounds = ax.dataLim.bounds
assert not any(map(np.isinf, bounds)), 'Cannot set bounds if dataLim has infinite elements'
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)
def plot_legend(ax, no_legend=True, legend_arg=None):
"""Function that defines the legend options of a matplotlib plot.
Args:
ax: matplotlib axes
no_legend (bool): Defines the presence of a legend in the figure
legend_arg (dict): Addition arguments for matplotlib.legend() call
"""
legend_arg = dict_if_none(legend_arg)
if not no_legend:
ax.legend(**legend_arg)
_LINSPACE_COUNT = 300
def _get_normals(v):
"""Get two vectors that form a basis w/ v.
Note: returned vectors are unit
"""
not_v = np.array([1, 0, 0])
if np.all(np.abs(v) == not_v):
not_v = np.array([0, 1, 0])
n1 = np.cross(v, not_v)
n1 /= norm(n1)
n2 = np.cross(v, n1)
return n1, n2
def generate_cylindrical_points(start, end, start_radius, end_radius,
linspace_count=_LINSPACE_COUNT):
"""Generate a 3d mesh of a cylinder with start and end points, and varying radius.
Based on: http://stackoverflow.com/a/32383775
"""
v = end - start
length = norm(v)
v = v / length
n1, n2 = _get_normals(v)
# pylint: disable=unbalanced-tuple-unpacking
l, theta = np.meshgrid(np.linspace(0, length, linspace_count),
np.linspace(0, 2 * np.pi, linspace_count))
radii = np.linspace(start_radius, end_radius, linspace_count)
rsin = np.multiply(radii, np.sin(theta))
rcos = np.multiply(radii, np.cos(theta))
return np.array([start[i] +
v[i] * l +
n1[i] * rsin + n2[i] * rcos
for i in range(3)])
def project_cylinder_onto_2d(ax, plane,
start, end, start_radius, end_radius,
color='black', alpha=1.):
"""Take cylinder defined by start/end, and project it onto the plane.
Args:
ax: matplotlib axes
plane(tuple of int): where x, y, z = 0, 1, 2, so (0, 1) is the xy axis
start(np.array): start coordinates
end(np.array): end coordinates
start_radius(float): start radius
end_radius(float): end radius
color: matplotlib color
alpha(float): alpha value
Note: There are probably more efficient ways of doing this: here the
3d outline is calculated, the non-used plane coordinates are dropped, a
tight convex hull is found, and that is used for a filled polygon
"""
points = generate_cylindrical_points(start, end, start_radius, end_radius, 10)
points = np.vstack([points[plane[0]].ravel(),
points[plane[1]].ravel()])
points = points.T
hull = ConvexHull(points)
ax.add_patch(Polygon(points[hull.vertices], fill=True, color=color, alpha=alpha))
def plot_cylinder(ax, start, end, start_radius, end_radius,
color='black', alpha=1., linspace_count=_LINSPACE_COUNT):
"""Plot a 3d cylinder."""
assert not np.all(start == end), 'Cylinder must have length'
x, y, z = generate_cylindrical_points(start, end, start_radius, end_radius,
linspace_count=linspace_count)
ax.plot_surface(x, y, z, color=color, alpha=alpha)
def plot_sphere(ax, center, radius, color='black', alpha=1., linspace_count=_LINSPACE_COUNT):
"""Plots a 3d sphere, given the center and the radius."""
u = np.linspace(0, 2 * np.pi, linspace_count)
v = np.linspace(0, np.pi, linspace_count)
sin_v = np.sin(v)
x = center[0] + radius * np.outer(np.cos(u), sin_v)
y = center[1] + radius * np.outer(np.sin(u), sin_v)
z = center[2] + radius * np.outer(np.ones_like(u), np.cos(v))
ax.plot_surface(x, y, z, linewidth=0.0, color=color, alpha=alpha)
| bsd-3-clause |
pkathail/magic | python/setup.py | 1 | 2225 | import os
import sys
from setuptools import setup
install_requires = [
"numpy>=1.14.0",
"scipy>=1.1.0",
"matplotlib",
"scikit-learn>=0.19.1",
"future",
"tasklogger>=1.0.0",
"graphtools>=1.4.0",
"pandas>=0.25",
"scprep>=1.0",
]
test_requires = [
"nose2",
]
if sys.version_info[0] == 3:
test_requires += ["anndata"]
doc_requires = [
"sphinx",
"sphinxcontrib-napoleon",
]
if sys.version_info[:2] < (3, 5):
raise RuntimeError("Python version >=3.5 required.")
elif sys.version_info[:2] >= (3, 6):
test_requires += ["black"]
version_py = os.path.join(os.path.dirname(__file__), "magic", "version.py")
version = open(version_py).read().strip().split("=")[-1].replace('"', "").strip()
readme = open("README.rst").read()
setup(
name="magic-impute",
version=version,
description="MAGIC",
author="",
author_email="",
packages=["magic",],
license="GNU General Public License Version 2",
install_requires=install_requires,
extras_require={"test": test_requires, "doc": doc_requires},
test_suite="nose2.collector.collector",
long_description=readme,
url="https://github.com/KrishnaswamyLab/MAGIC",
download_url="https://github.com/KrishnaswamyLab/MAGIC/archive/v{}.tar.gz".format(
version
),
keywords=[
"visualization",
"big-data",
"dimensionality-reduction",
"embedding",
"manifold-learning",
"computational-biology",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Framework :: Jupyter",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
| gpl-2.0 |
mugizico/scikit-learn | sklearn/neighbors/base.py | 115 | 29783 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
raghavrv/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
r-mart/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 56 | 37976 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
procoder317/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
0x0all/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/blocking_input.py | 69 | 12119 | """
This provides several classes used for blocking interaction with figure windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
import time
import numpy as np
from matplotlib import path, verbose
from matplotlib.cbook import is_sequence_of_strings
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
assert is_sequence_of_strings(eventslist), "Requires a sequence of event name strings"
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks=[]
def add_event(self,event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self,index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self,index=-1):
self.pop_event(index)
pop.__doc__=pop_event.__doc__
def __call__(self, n=1, timeout=30 ):
"""
Blocking call to retrieve n events
"""
assert isinstance(n, int), "Requires an integer argument"
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append( self.fig.canvas.mpl_connect(n, self.on_event) )
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event') )
def post_event(self):
"""
This will be called to process events
"""
assert len(self.events)>0, "No events yet"
if self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == 3:
self.button3(event)
elif button == 2:
self.button2(event)
else:
self.button1(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
key = event.key
if key == 'backspace' or key == 'delete':
self.button3(event)
elif key == 'enter':
self.button2(event)
else:
self.button1(event)
def button1( self, event ):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self)
def button2( self, event ):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self)
# This will exit even if not in infinite mode. This is
# consistent with matlab and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def button3( self, event ):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self)
# Now remove any existing clicks if possible
if len(self.events)>0:
self.pop()
def add_click(self,event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata,event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks),event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
self.marks.extend(
event.inaxes.plot([event.xdata,], [event.ydata,], 'r+') )
self.fig.canvas.draw()
def pop_click(self,index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
def pop(self,index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(index)
BlockingInput.pop(self,index)
def cleanup(self):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self,n=n,timeout=timeout)
return self.clicks
class BlockingContourLabeler( BlockingMouseInput ):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self,cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure )
def button1(self,event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
cs = self.cs
if event.inaxes == cs.ax:
conmin,segmin,imin,xmin,ymin = cs.find_nearest_contour(
event.x, event.y, cs.labelIndiceList)[:5]
# Get index of nearest level in subset of levels used for labeling
lmin = cs.labelIndiceList.index(conmin)
# Coordinates of contour
paths = cs.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = cs.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = cs.get_label_width(cs.labelLevelList[lmin],
cs.labelFmt, cs.labelFontSizeList[lmin])
"""
# requires python 2.5
# Figure out label rotation.
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lc if self.inline else [],
self.inline_spacing )
"""
# Figure out label rotation.
if self.inline: lcarg = lc
else: lcarg = None
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
self.inline_spacing )
cs.add_label(xmin,ymin,rotation,cs.labelLevelList[lmin],
cs.labelCValueList[lmin])
if self.inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n)>1:
paths.append( path.Path(n) )
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self,event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
# Remove this last event - not too important for clabel use
# since clabel normally doesn't have a maximum number of
# events, but best for cleanliness sake.
BlockingInput.pop(self)
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self,inline,inline_spacing=5,n=-1,timeout=-1):
self.inline=inline
self.inline_spacing=inline_spacing
BlockingMouseInput.__call__(self,n=n,timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=('button_press_event','key_press_event') )
def post_event(self):
"""
Determines if it is a key event
"""
assert len(self.events)>0, "No events yet"
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self,n=1,timeout=timeout)
return self.keyormouse
| gpl-3.0 |
alubbock/pysb | pysb/examples/paper_figures/fig6.py | 5 | 9204 | """Produce contact map for Figure 5D from the PySB publication"""
from __future__ import print_function
import pysb.integrate
import pysb.util
import numpy as np
import scipy.optimize
import scipy.interpolate
import matplotlib.pyplot as plt
import os
import sys
import inspect
from earm.lopez_embedded import model
# List of model observables and corresponding data file columns for
# point-by-point fitting
obs_names = ['mBid', 'cPARP']
data_names = ['norm_ICRP', 'norm_ECRP']
var_names = ['nrm_var_ICRP', 'nrm_var_ECRP']
# Load experimental data file
data_path = os.path.join(os.path.dirname(__file__), 'fig6_data.csv')
exp_data = np.genfromtxt(data_path, delimiter=',', names=True)
# Model observable corresponding to the IMS-RP reporter (MOMP timing)
momp_obs = 'aSmac'
# Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and
# yfinal (the last value of the IMS-RP trajectory)
momp_data = np.array([9810.0, 180.0, 1.0])
momp_var = np.array([7245000.0, 3600.0, 1e-9])
# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 10
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
(ntimes-1) * tmul + 1)
# Initialize solver object
solver = pysb.integrate.Solver(model, tspan, rtol=1e-5, atol=1e-5)
# Get parameters for rates only
rate_params = model.parameters_rules()
# Build a boolean mask for those params against the entire param list
rate_mask = np.array([p in rate_params for p in model.parameters])
# Build vector of nominal parameter values from the model
nominal_values = np.array([p.value for p in model.parameters])
# Set the radius of a hypercube bounding the search space
bounds_radius = 2
def objective_func(x, rate_mask, lb, ub):
caller_frame, _, _, caller_func, _, _ = inspect.stack()[1]
if caller_func in {'anneal', '_minimize_anneal'}:
caller_locals = caller_frame.f_locals
if caller_locals['n'] == 1:
print(caller_locals['best_state'].cost, caller_locals['current_state'].cost)
# Apply hard bounds
if np.any((x < lb) | (x > ub)):
print("bounds-check failed")
return np.inf
# Simulate model with rates taken from x (which is log transformed)
param_values = np.array([p.value for p in model.parameters])
param_values[rate_mask] = 10 ** x
solver.run(param_values)
# Calculate error for point-by-point trajectory comparisons
e1 = 0
for obs_name, data_name, var_name in zip(obs_names, data_names, var_names):
# Get model observable trajectory (this is the slice expression
# mentioned above in the comment for tspan)
ysim = solver.yobs[obs_name][::tmul]
# Normalize it to 0-1
ysim_norm = ysim / np.nanmax(ysim)
# Get experimental measurement and variance
ydata = exp_data[data_name]
yvar = exp_data[var_name]
# Compute error between simulation and experiment (chi-squared)
e1 += np.sum((ydata - ysim_norm) ** 2 / (2 * yvar)) / len(ydata)
# Calculate error for Td, Ts, and final value for IMS-RP reporter
# =====
# Normalize trajectory
ysim_momp = solver.yobs[momp_obs]
ysim_momp_norm = ysim_momp / np.nanmax(ysim_momp)
# Build a spline to interpolate it
st, sc, sk = scipy.interpolate.splrep(solver.tspan, ysim_momp_norm)
# Use root-finding to find the point where trajectory reaches 10% and 90%
t10 = scipy.interpolate.sproot((st, sc-0.10, sk))[0]
t90 = scipy.interpolate.sproot((st, sc-0.90, sk))[0]
# Calculate Td as the mean of these times
td = (t10 + t90) / 2
# Calculate Ts as their difference
ts = t90 - t10
# Get yfinal, the last element from the trajectory
yfinal = ysim_momp_norm[-1]
# Build a vector of the 3 variables to fit
momp_sim = [td, ts, yfinal]
# Perform chi-squared calculation against mean and variance vectors
e2 = np.sum((momp_data - momp_sim) ** 2 / (2 * momp_var)) / 3
# Calculate error for final cPARP value (ensure all PARP is cleaved)
cparp_final = model.parameters['PARP_0'].value
cparp_final_var = .01
cparp_final_sim = solver.yobs['cPARP'][-1]
e3 = (cparp_final - cparp_final_sim) ** 2 / (2 * cparp_final_var)
error = e1 + e2 + e3
return error
def estimate(start_values=None):
"""Estimate parameter values by fitting to data.
Parameters
==========
parameter_values : numpy array of floats, optional
Starting parameter values. Taken from model's nominal parameter values
if not specified.
Returns
=======
numpy array of floats, containing fitted parameter values.
"""
# Set starting position to nominal parameter values if not specified
if start_values is None:
start_values = nominal_values
else:
assert start_values.shape == nominal_values.shape
# Log-transform the starting position
x0 = np.log10(start_values[rate_mask])
# Displacement size for annealing moves
dx = .02
# The default 'fast' annealing schedule uses the 'lower' and 'upper'
# arguments in a somewhat counterintuitive way. See
# http://projects.scipy.org/scipy/ticket/1126 for more information. This is
# how to get the search to start at x0 and use a displacement on the order
# of dx (note that this will affect the T0 estimation which *does* expect
# lower and upper to be the absolute expected bounds on x).
lower = x0 - dx / 2
upper = x0 + dx / 2
# Log-transform the rate parameter values
xnominal = np.log10(nominal_values[rate_mask])
# Hard lower and upper bounds on x
lb = xnominal - bounds_radius
ub = xnominal + bounds_radius
# Perform the annealing
args = [rate_mask, lb, ub]
(xmin, Jmin, Tfinal, feval, iters, accept, retval) = \
scipy.optimize.anneal(objective_func, x0, full_output=True,
maxiter=4000, quench=0.5,
lower=lower, upper=upper,
args=args)
# Construct vector with resulting parameter values (un-log-transformed)
params_estimated = start_values.copy()
params_estimated[rate_mask] = 10 ** xmin
# Display annealing results
for v in ('xmin', 'Jmin', 'Tfinal', 'feval', 'iters', 'accept', 'retval'):
print("%s: %s" % (v, locals()[v]))
return params_estimated
def display(params_estimated):
# Simulate model with nominal parameters and construct a matrix of the
# trajectories of the observables of interest, normalized to 0-1.
solver.run()
obs_names_disp = ['mBid', 'aSmac', 'cPARP']
obs_totals = [model.parameters[n].value for n in ('Bid_0', 'Smac_0', 'PARP_0')]
sim_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
sim_obs_norm = (sim_obs / obs_totals).T
# Do the same with the estimated parameters
solver.run(params_estimated)
sim_est_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
sim_est_obs_norm = (sim_est_obs / obs_totals).T
# Plot data with simulation trajectories both before and after fitting
color_data = '#C0C0C0'
color_orig = '#FAAA6A'
color_est = '#83C98E'
plt.subplot(311)
plt.errorbar(exp_data['Time'], exp_data['norm_ICRP'],
yerr=exp_data['nrm_var_ICRP']**0.5, c=color_data, linewidth=2,
elinewidth=0.5)
plt.plot(solver.tspan, sim_obs_norm[0], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[0], color_est, linewidth=2)
plt.ylabel('Fraction of\ncleaved IC-RP/Bid', multialignment='center')
plt.axis([0, 20000, -0.2, 1.2])
plt.subplot(312)
plt.vlines(momp_data[0], -0.2, 1.2, color=color_data, linewidth=2)
plt.plot(solver.tspan, sim_obs_norm[1], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[1], color_est, linewidth=2)
plt.ylabel('Td / Fraction of\nreleased Smac', multialignment='center')
plt.axis([0, 20000, -0.2, 1.2])
plt.subplot(313)
plt.errorbar(exp_data['Time'], exp_data['norm_ECRP'],
yerr=exp_data['nrm_var_ECRP']**0.5, c=color_data, linewidth=2,
elinewidth=0.5)
plt.plot(solver.tspan, sim_obs_norm[2], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[2], color_est, linewidth=2)
plt.ylabel('Fraction of\ncleaved EC-RP/PARP', multialignment='center')
plt.xlabel('Time (s)')
plt.axis([0, 20000, -0.2, 1.2])
plt.show()
if __name__ == '__main__':
params_estimated = None
try:
earm_path = sys.modules['earm'].__path__[0]
fit_file = os.path.join(earm_path, '..', 'EARM_2_0_M1a_fitted_params.txt')
params_estimated = np.genfromtxt(fit_file)[:,1].copy()
except IOError:
pass
if params_estimated is None:
np.random.seed(1)
params_estimated = estimate()
display(params_estimated)
| bsd-2-clause |
antoinecarme/pyaf | tests/bugs/issue_46/issue_46_one_or_two_rows.py | 2 | 2528 | import numpy as np
import pandas as pd
import pyaf.ForecastEngine as autof
# the goal of these tests is to make pyaf as robust as possible against very small/bad datasets
# pyaf should automatically produce reasonable/naive/trivial models in these cases.
# it should not fail in any case (normal behavior expected)
def test_fake_model_1_row(iHorizon_train , iHorizon_apply):
# one row dataset => always constant forecast
df = pd.DataFrame([[0 , 0.54543]], columns = ['date' , 'signal'])
lEngine = autof.cForecastEngine()
lEngine.train(df , 'date' , 'signal', iHorizon_train);
# print(lEngine.mSignalDecomposition.mBestModel.mTimeInfo.info())
print(lEngine.mSignalDecomposition.mBestModel.getFormula())
print("PERFS_MAPE_MASE", lEngine.mSignalDecomposition.mBestModel.mForecastPerf.mMAPE,
lEngine.mSignalDecomposition.mBestModel.mForecastPerf.mMASE, )
# print(df.head())
df1 = lEngine.forecast(df , iHorizon_apply)
# print(df1.columns)
Forecast_DF = df1[['date' , 'signal', 'signal' + '_Forecast', 'signal_Residue', 'signal_Forecast_Lower_Bound',
'signal_Forecast_Upper_Bound']]
# print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(iHorizon_apply));
def test_fake_model_2_rows(iHorizon_train , iHorizon_apply):
# one row dataset => always constant forecast
df = pd.DataFrame([[0 , 0.54543] , [1 , 0.43]], columns = ['date' , 'signal'])
lEngine = autof.cForecastEngine()
lEngine.train(df , 'date' , 'signal', iHorizon_train);
# print(lEngine.mSignalDecomposition.mBestModel.mTimeInfo.info())
print(lEngine.mSignalDecomposition.mBestModel.getFormula())
print("PERFS_MAPE_MASE", lEngine.mSignalDecomposition.mBestModel.mForecastPerf.mMAPE,
lEngine.mSignalDecomposition.mBestModel.mForecastPerf.mMASE, )
# print(df.head())
df1 = lEngine.forecast(df , iHorizon_apply)
# print(df1.columns)
Forecast_DF = df1[['date' , 'signal', 'signal' + '_Forecast', 'signal_Residue', 'signal_Forecast_Lower_Bound',
'signal_Forecast_Upper_Bound']]
# print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(iHorizon_apply));
# In[3]:
test_fake_model_1_row( 2, 1)
# In[4]:
test_fake_model_1_row( 1, 2)
# In[5]:
test_fake_model_1_row( 2, 10)
# In[6]:
test_fake_model_1_row( 20, 10)
# In[7]:
test_fake_model_2_rows( 1, 4)
# In[8]:
test_fake_model_2_rows( 6, 2)
# In[9]:
test_fake_model_2_rows( 6, 1)
# In[10]:
test_fake_model_2_rows( 1 , 7)
| bsd-3-clause |
jlegendary/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
jk1/intellij-community | python/helpers/pydev/pydev_ipython/inputhook.py | 21 | 19415 | # coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_QT5 = 'qt5'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version # @UndefinedVariable
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__) # @UndefinedVariable
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp() # @UndefinedVariable
if app is None:
app = wx.App(redirect=False, clearSigInt=False) # @UndefinedVariable
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt(self, app=None):
from pydev_ipython.qt_for_kernel import QT_API, QT_API_PYQT5
if QT_API == QT_API_PYQT5:
self.enable_qt5(app)
else:
self.enable_qt4(app)
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_qt5(self, app=None):
from pydev_ipython.inputhookqt5 import create_inputhook_qt5
app, inputhook_qt5 = create_inputhook_qt5(self, app)
self.set_inputhook(inputhook_qt5)
self._current_gui = GUI_QT5
app._in_event_loop = True
self._apps[GUI_QT5] = app
return app
def disable_qt5(self):
if GUI_QT5 in self._apps:
self._apps[GUI_QT5]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK # @UnresolvedImport
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt = inputhook_manager.enable_qt
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_qt5 = inputhook_manager.enable_qt5
disable_qt5 = inputhook_manager.disable_qt5
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt,
GUI_QT4: enable_qt4,
GUI_QT5: enable_qt5,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_QT5",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt",
"enable_qt4",
"disable_qt4",
"enable_qt5",
"disable_qt5",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
| apache-2.0 |
phdowling/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/backends/backend_cairo.py | 4 | 17135 | """
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import division, print_function
import os, sys, warnings, gzip
import numpy as np
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that pycairo is installed.")
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self.dpi = dpi
self.gc = GraphicsContextCairo (renderer=self)
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
RendererBase.__init__(self)
def set_ctx_from_surface (self, surface):
self.gc.ctx = cairo.Context (surface)
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha, alpha_overrides):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3 or alpha_overrides:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
@staticmethod
def convert_path(ctx, path, transform):
for points, code in path.iter_segments(transform):
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
def draw_path(self, gc, path, transform, rgbFace=None):
if len(path.vertices) > 18980:
raise ValueError("The Cairo backend can not draw paths longer than 18980 points.")
ctx = gc.ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
ctx.new_path()
self.convert_path(ctx, path, transform)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
def draw_image(self, gc, x, y, im):
# bbox - not currently used
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
im.flipud_out()
rows, cols, buf = im.color_conv (BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data (
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
ctx = gc.ctx
y = self.height - y - rows
ctx.save()
ctx.set_source_surface (surface, x, y)
ctx.paint()
ctx.restore()
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * np.pi / 180)
ctx.set_font_size (size)
if sys.version_info[0] < 3:
ctx.show_text(s.encode("utf-8"))
else:
ctx.show_text(s)
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * np.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
if sys.version_info[0] < 3:
ctx.show_text(s.encode("utf-8"))
else:
ctx.show_text(s)
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self.gc.ctx.save()
self.gc._alpha = 1.0
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
return self.gc
def points_to_pixels(self, points):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
def restore(self):
self.ctx.restore()
def set_alpha(self, alpha):
GraphicsContextBase.set_alpha(self, alpha)
_alpha = self.get_alpha()
rgb = self._rgb
if self.get_forced_alpha():
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], _alpha)
else:
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], rgb[3])
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
if not rectangle: return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = round(x), round(y), round(w), round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
def set_clip_path(self, path):
if not path: return
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
RendererCairo.convert_path(ctx, tpath, affine)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash (
self.renderer.points_to_pixels (np.asarray(dashes)), offset)
def set_foreground(self, fg, isRGBA=None):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = w
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print('%s()' % (_fn_name()))
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasCairo(figure)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not cairo.HAS_PS_SURFACE:
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not cairo.HAS_PDF_SURFACE:
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not cairo.HAS_SVG_SURFACE:
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
filename = fo
if is_string_like(fo):
fo = open(fo, 'wb')
close = True
else:
close = False
try:
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
finally:
if close:
fo.close()
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.gc.ctx
if orientation == 'landscape':
ctx.rotate (np.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
| mit |
rc500/ardrone_archive_aarons_laptop | rjw57-playground/experiments/experiments.py | 1 | 4805 | import json, os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.image as mpimg
import matplotlib.animation as mpanim
def load_log(filename=None):
if filename is None:
filename = '/data/rjw57/ardrone/logs/rjw57_office/rjw57_office_log.txt'
log = []
with open(filename) as log_file:
for l in log_file:
log.append(json.loads(l))
return log
def load_drone_states_from_log(filename=None):
"""Return an array where each row is one record from the log. Each row records:
* 0, The time (in seconds) of the data capture
* 1, The theta orientation angle in radians
* 2, The phi orientation angle in radians
* 3, The psi orientation angle in radians
* 4, The altitude from the altimeter in millimetres
* 5, The linear x velocity in millimetres/second (guessed units)
* 6, The linear y velocity in millimetres/second (guessed units)
* 7, The linear z velocity in millimetres/second (guessed units)
* 8, The (absolute) change in theta since the last frame in radians
* 9, The (absolute) change in phi since the last frame in radians
* 10, The (absolute) change in psi since the last frame in radians
* 11, The time (in seconds) of this log message which can be used to synchronise with a video frame
"""
if filename is None:
filename = '/data/rjw57/ardrone/logs/rjw57_office/rjw57_office_log.txt'
log = []
last_stamp = None
with open(filename) as log_file:
for l in log_file:
record = json.loads(l)
if record['type'] == 'state_from_drone' and record['what']['type'] == 'vision':
vision = record['what']
capture_stamp = vision['time_capture']
# Skip duplicate records
if last_stamp is not None and last_stamp == capture_stamp:
continue
last_stamp = capture_stamp
capture_seconds = capture_stamp >> 21
capture_useconds = capture_stamp & ((1<<21) - 1)
capture_time = (1e-6 * capture_useconds) + capture_seconds
log.append([
capture_time,
vision['theta_capture'], vision['phi_capture'], vision['psi_capture'], vision['altitude_capture'],
vision['body_v']['x'], vision['body_v']['y'], vision['body_v']['z'],
vision['delta_theta'], vision['delta_phi'], vision['delta_psi'],
record['when']
])
return np.array(log)
def load_video_filenames_from_log(filename=None):
"""Return a list of (timestamp, filename) pairs for the video filenames."""
if filename is None:
filename = '/data/rjw57/ardrone/logs/rjw57_office/rjw57_office_log.txt'
file_base = os.path.dirname(filename)
log = []
last_stamp = None
with open(filename) as log_file:
for l in log_file:
record = json.loads(l)
if record['type'] == 'frame_from_drone':
log.append((record['when'], os.path.join(file_base, record['what'])))
return log
def gen_animation(frames, log):
fig = plt.figure()
state = {'start_hint': 0}
fps = len(frames) / (frames[-1][0] - frames[0][0])
def anim_func(frame_index, state):
print('Frame %i/%i' % (1+frame_index, len(frames)))
fig.set_dpi(100)
fig.set_figwidth(1280.0/fig.get_dpi())
fig.set_figheight(720.0/fig.get_dpi())
state['start_hint'] = plot_video_frame(frames, log, frame_index, start_hint=state['start_hint'], fig=fig)
anim = mpanim.FuncAnimation(fig, anim_func, range(len(frames)), init_func=fig.clear, fargs=(state,))
anim.save('anim.mp4', fps=fps, clear_temp=False, frame_prefix='gen_anim_temp_')
def plot_video_frame(frames, log, frame_idx, start_hint=0, png_file=None, fig=None):
frame_when, frame_file = frames[frame_idx]
idx = start_hint
while idx < len(log) and log[idx, 11] < frame_when:
idx += 1
if idx >= len(log):
raise IndexError('frame is beyond end of log')
im = mpimg.imread(frame_file)
if fig is None:
fig = plt.figure()
fig.clear()
ax = fig.add_axes([0, 0, 0.8, 1], projection='3d')
plot_path(log, ax=ax, highlight_idx=idx)
ax2 = fig.add_axes([0.725, 0.35, 0.3, 0.3])
ax2.axison = False
plt.imshow(im, axes=ax2, origin='lower', aspect='equal')
plt.draw()
if png_file is not None:
plt.savefig(png_file)
return idx
def plot_path(log, ax=None, highlight_idx=None):
"""Pass this a log array as provided by load_drone_states_from_log()."""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
delta_t = np.diff(log[:,0])
x = np.cumsum(log[:-1,5] * delta_t)
y = np.cumsum(log[:-1,6] * delta_t)
#z = np.cumsum(log[:-1,7] * delta_t)
z = log[:-1,4]
ax.plot(x, y, z)
if highlight_idx is not None:
hx = [x[highlight_idx],]
hy = [y[highlight_idx],]
hz = [z[highlight_idx],]
ax.plot(hx, hy , hz, 'r.', ms=10)
| apache-2.0 |
andaag/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/series/test_repr.py | 8 | 5953 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, date_range)
from pandas.core.index import MultiIndex
from pandas.compat import StringIO, lrange, range, u
from pandas import compat
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second", "foo one 0",
" two 1", " three 2",
"bar one 3", " two 4",
"baz two 5", " three 6",
"qux one 7", " two 8",
" three 9", "Name: sth, dtype: int64"]
expected = "\n".join(expected)
self.assertEqual(repr(s), expected)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
# test big series (diff code path)
s = Series(lrange(0, 1000))
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
s = Series(index=date_range('20010101', '20020101'), name='test')
self.assertIn("Name: test", repr(s))
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'), (1, 2), ('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"])
self.assertFalse("\t" in repr(ser))
self.assertFalse("\r" in repr(ser))
self.assertFalse("a\n" in repr(ser))
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
self.assertEqual(repr(s), 'Series([], Name: foo, dtype: int64)')
s = Series([], dtype=np.int64, name=None)
self.assertEqual(repr(s), 'Series([], dtype: int64)')
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
# it works (with no Cython exception barf)!
repr(s)
finally:
sys.stderr = tmp
self.assertEqual(buf.getvalue(), '')
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"), ) * 2
repr(s)
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
self.assertTrue(type(df.__repr__() == str)) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))
ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
| mit |
exepulveda/swfc | python/clustering_swfc_bm_clusters.py | 1 | 5823 | '''
This script perform WFC and SWFC for many clusters and calculate DB and Silhouette indices
'''
import sys
import collections
import sys
import random
sys.path += ['..']
import clusteringlib as cl
import numpy as np
import scipy.stats
import clustering_ga
from scipy.spatial.distance import pdist
from sklearn.cluster import KMeans
from cluster_utils import fix_weights
from graph_labeling import graph_cut, make_neighbourhood
from scipy.spatial import cKDTree
CHECK_VALID = False
from case_study_bm import attributes,setup_case_study_ore,setup_case_study_all,setup_distances
if __name__ == "__main__":
locations,data,min_values,max_values,scale,var_types,categories = setup_case_study_ore(a=0.999)
N,ND = data.shape
print(N,ND)
#print(min_values)
#print(max_values)
#print(scale)
seed = 1634120
#if args.target:
# targets = np.asfortranarray(np.percentile(values[:,-1], [25,50,75]),dtype=np.float32)
# var_types[-1] = 2
m = 2.0
verbose=0
lambda_value = 0.25
filename_template = "../results/bm_{tag}_wfc_{nc}.csv"
ngen=200
npop=200
cxpb=0.8
mutpb=0.4
stop_after=40
targets = np.asfortranarray(np.percentile(data[:,-1], [15,50,85]),dtype=np.float32)
var_types[-1] = 2
force = (ND-1,0.15) #weight to target 15%
knn = 15
kdtree = cKDTree(locations)
neighbourhood,distances = make_neighbourhood(kdtree,locations,knn,max_distance=2.0)
distances = np.array(distances)
for NC in range(2,11):
np.random.seed(seed)
random.seed(seed)
cl.utils.set_seed(seed)
setup_distances(scale,var_types,use_cat=True,targets=targets)
#initial centroids
kmeans_method = KMeans(n_clusters=NC,random_state=seed)
kmeans_method.fit(data)
current_centroids = np.asfortranarray(np.empty((NC,ND)))
current_centroids[:,:] = kmeans_method.cluster_centers_
#initial weights are uniform
weights = np.asfortranarray(np.ones((NC,ND),dtype=np.float32)/ ND)
#if args.target:
# for c in range(NC):
# weights[c,:] = fix_weights(weights[c,:],force=force)
for k in range(20):
best_centroids,best_u,best_energy_centroids,best_jm,current_temperature,evals = clustering_ga.optimize_centroids(
data,
current_centroids,
weights,
m,
lambda_value,
var_types,
{},
ngen=ngen,npop=npop,cxpb=cxpb,mutpb=mutpb,stop_after=stop_after,
min_values = min_values,
max_values = max_values,
verbose=verbose)
#print("centroids",best_centroids,best_energy_centroids,"jm",best_jm)
u = best_u
N,NC = u.shape
clusters = np.argmax(u,axis=1)
centroids = best_centroids.copy()
#print("centroids",centroids)
#print("u",u)
#counter = collections.Counter(clusters)
#print("number of clusters: ",counter.most_common())
best_weights,best_u,best_energy_weights,evals = clustering_ga.optimize_weights(
data,
centroids,
weights,
m,
lambda_value,
ngen=ngen,npop=npop,cxpb=cxpb,mutpb=mutpb,stop_after=stop_after,
force=force,
verbose=verbose)
clusters = np.argmax(best_u,axis=1)
weights = best_weights.copy()
current_centroids = best_centroids.copy()
#print(lambda_value,k,best_energy_centroids,best_energy_weights,"jm",best_jm)
print('iteration',k,best_energy_centroids,best_energy_weights)
#save data
new_data = np.c_[locations,clusters]
np.savetxt(filename_template.format(tag='clusters',nc=NC),new_data,delimiter=",",fmt="%.4f")
np.savetxt(filename_template.format(tag='centroids',nc=NC),current_centroids,delimiter=",",fmt="%.4f")
np.savetxt(filename_template.format(tag='u',nc=NC),best_u,delimiter=",",fmt="%.4f")
np.savetxt(filename_template.format(tag='weights',nc=NC),best_weights,delimiter=",",fmt="%.4f")
if abs(best_energy_centroids - best_energy_weights) < 1e-2:
break
centroid = np.asfortranarray(best_centroids,dtype=np.float32)
weights = np.asfortranarray(best_weights,dtype=np.float32)
clusters = np.asfortranarray(clusters,dtype=np.int8)
ret_fc = cl.clustering.dbi_index(centroid,data,clusters,weights)
ret_sill= cl.clustering.silhouette_index(data,clusters,weights)
print("WFC: DB,Sil:",NC,ret_fc,ret_sill,sep=',')
#Spatial correction
clusters_graph = np.int32(graph_cut(locations,neighbourhood,best_u,unary_constant=70.0,smooth_constant=15.0,verbose=0))
centroids_F = np.asfortranarray(np.empty((NC,ND)),dtype=np.float32)
#calculate centroids back
for k in range(NC):
indices = np.where(clusters_graph == k)[0]
centroids_F[k,:] = np.mean(data[indices,:],axis=0)
clusters = np.asfortranarray(clusters_graph,dtype=np.int8)
ret_swfc_dbi = cl.clustering.dbi_index(centroids_F,data,clusters,weights)
ret_swfc_sill= cl.clustering.silhouette_index(data,clusters,weights)
print("SWFC: DB,Sil:",NC,ret_swfc_dbi,ret_swfc_sill,sep=',')
cl.distances.reset()
| gpl-3.0 |
agartland/utils | seqdistance_old.py | 1 | 26727 |
from functools import *
import itertools
import operator
from Bio import SeqIO, pairwise2
from Bio.SubsMat.MatrixInfo import blosum90, ident, blosum62
from copy import deepcopy
import sys
import numpy as np
import numba as nb
import re
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA, KernelPCA
from sklearn import cluster
from sklearn.manifold import Isomap
import tsne
import pytsne
__all__ = ['BADAA',
'AALPHABET',
'AA2CODE',
'CODE2AA',
'isvalidpeptide',
'removeBadAA',
'hamming_distance',
'trunc_hamming',
'dichot_hamming',
'seq2vec',
'nanGapScores',
'nanZeroGapScores',
'binGapScores',
'blosum90GapScores',
'binarySubst',
'addGapScores',
'seq_similarity',
'seq_distance',
'seq_similarity_old',
'unalign_similarity',
'_test_seq_similarity',
'calcDistanceMatrix',
'calcDistanceRectangle',
'blosum90',
'ident',
'blosum62',
'embedDistanceMatrix']
BADAA = '-*BX#Z'
FULL_AALPHABET = 'ABCDEFGHIKLMNPQRSTVWXYZ-'
AALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA2CODE = {aa:i for i, aa in enumerate(FULL_AALPHABET)}
AA2CODE.update({'-':23})
CODE2AA = {i:aa for i, aa in enumerate(FULL_AALPHABET)}
CODE2AA.update({23:'-'})
def subst2mat(subst,alphabet = FULL_AALPHABET):
"""Converts a substitution dictionary
(like those from Bio) into a numpy 2d substitution matrix"""
mat = np.nan * np.zeros((len(alphabet), len(alphabet)), dtype = np.float64)
for (aa1, aa2), v in list(subst.items()):
mat[alphabet.index(aa1), alphabet.index(aa2)] = v
return mat
"""Many different ways of handling gaps. Remember that these are SIMILARITY scores"""
nanGapScores={('-', '-'):np.nan,
('-', 'X'):np.nan,
('X', '-'):np.nan}
nanZeroGapScores={('-', '-'):np.nan,
('-', 'X'):0,
('X', '-'):0}
"""Default for addGapScores()"""
binGapScores={('-', '-'):1,
('-', 'X'):0,
('X', '-'):0}
"""Arbitrary/reasonable values (extremes for blosum90 I think)"""
blosum90GapScores={('-', '-'):5,
('-', 'X'):-11,
('X', '-'):-11}
binarySubst = {(aa1, aa2):np.float64(aa1==aa2) for aa1, aa2 in itertools.product(FULL_AALPHABET, FULL_AALPHABET)}
identMat = subst2mat(ident)
blosum90Mat = subst2mat(blosum90)
blosum62Mat = subst2mat(blosum62)
binaryMat = subst2mat(binarySubst)
def isvalidpeptide(mer,badaa=None):
"""Test if the mer contains an BAD amino acids in global BADAA
typically -*BX#Z"""
if badaa is None:
badaa = BADAA
if not mer is None:
return not re.search('[%s]' % badaa, mer)
else:
return False
def removeBadAA(mer,badaa=None):
"""Remove badaa amino acids from the mer, default badaa is -*BX#Z"""
if badaa is None:
badaa = BADAA
if not mer is None:
return re.sub('[%s]' % badaa, '', mer)
else:
return mer
def hamming_distance(str1, str2, noConvert = False, **kwargs):
"""Hamming distance between str1 and str2.
Only finds distance over the length of the shorter string.
**kwargs are so this can be plugged in place of a seq_distance() metric"""
if noConvert:
return np.sum([i for i in map(operator.__ne__, str1, str2)])
if isinstance(str1, str):
str1 = string2byte(str1)
if isinstance(str2, str):
str2 = string2byte(str2)
return nb_hamming_distance(str1, str2)
def aamismatch_distance(seq1,seq2, **kwargs):
if isinstance(seq1, str):
seq1 = seq2vec(seq1)
if isinstance(seq2, str):
seq2 = seq2vec(seq2)
dist12 = nb_seq_similarity(seq1, seq2, substMat = binaryMat, normed = False, asDistance = True)
return dist12
def string2byte(s):
"""Convert string to byte array since numba can't handle strings"""
if isinstance(s, str):
s = np.array(s)
dtype = s.dtype
if dtype is np.dtype('byte'):
return s # it's already a byte array
shape = list(s.shape)
n = dtype.itemsize
shape.append(n)
return s.ravel().view(dtype='byte').reshape(shape)
def seq2vec(seq):
"""Convert AA sequence into numpy vector of integers for fast comparison"""
vec = np.zeros(len(seq), dtype = np.int8)
for aai, aa in enumerate(seq):
vec[aai] = AA2CODE[aa]
return vec
def seqs2mat(seqs):
"""Convert a collection of AA sequences into a
numpy matrix of integers for fast comparison.
Requires all seqs to have the same length."""
L1 = len(seqs[0])
mat = np.zeros((len(seqs), L1), dtype = np.int8)
for si, s in enumerate(seqs):
assert L1 == len(s), "All sequences must have the same length: L1 = %d, but L%d = %d" % (L1, si, len(s))
for aai, aa in enumerate(s):
mat[si, aai] = AA2CODE[aa]
return mat
@nb.jit(nb.int8(nb.char[:], nb.char[:]), nopython = True)
def nb_hamming_distance(str1, str2):
tot = 0
for s1, s2 in zip(str1, str2):
if s1 != s2:
tot += 1
return tot
def trunc_hamming(seq1,seq2,maxDist=2,**kwargs):
"""Truncated hamming distance
d = hamming() if d<maxDist else d = maxDist"""
d = hamming_distance(seq1, seq2)
return maxDist if d >= maxDist else d
def dichot_hamming(seq1,seq2,mmTolerance=1,**kwargs):
"""Dichotamized hamming distance.
hamming <= mmTolerance is 0 and all others are 1"""
d = hamming_distance(seq1, seq2)
return 1 if d > mmTolerance else 0
def addGapScores(subst, gapScores = None, minScorePenalty = False, returnMat = False):
"""Add gap similarity scores for each AA (Could be done once for a set of sequences to improve speed)
if gapScores is None then it will use defaults:
gapScores={('-','-'):1,
('-','X'):0,
('X','-'):0}
OR for blosum90 default is:
blosum90GapScores={('-','-'):5,
('-','X'):-11,
('X','-'):-11}
"""
if minScorePenalty:
gapScores = {('-', '-') : 1,
('-', 'X') : np.min(list(subst.values())),
('X', '-') : np.min(list(subst.values()))}
elif gapScores is None:
if subst is binarySubst:
print('Using default binGapScores for binarySubst')
gapScores = binGapScores
elif subst is blosum90:
print('Using default blosum90 gap scores')
gapScores = blosum90GapScores
else:
raise Exception('Cannot determine which gap scores to use!')
su = deepcopy(subst)
uAA = np.unique([k[0] for k in list(subst.keys())])
su.update({('-', aa) : gapScores[('-', 'X')] for aa in uAA})
su.update({(aa, '-') : gapScores[('X', '-')] for aa in uAA})
su.update({('-', '-') : gapScores[('-', '-')]})
if returnMat:
return subst2mat(su)
return su
#@nb.jit(nb.float64(nb.int8[:],nb.int8[:],nb.float64[:,:],nb.boolean,nb.boolean), nopython = True)
@nb.jit(nopython = True)
def nb_seq_similarity(seq1, seq2, substMat, normed, asDistance):
"""Computes sequence similarity based on the substitution matrix."""
if seq1.shape[0] != seq2.shape[0]:
raise IndexError
if normed or asDistance:
sim12 = 0.
siteN = 0.
sim11 = 0.
sim22 = 0.
for i in range(seq1.shape[0]):
cur12 = substMat[seq1[i], seq2[i]]
cur11 = substMat[seq1[i], seq1[i]]
cur22 = substMat[seq2[i], seq2[i]]
if not np.isnan(cur12):
sim12 += cur12
siteN += 1.
if not np.isnan(cur11):
sim11 += cur11
if not np.isnan(cur22):
sim22 += cur22
sim12 = 2*sim12/((sim11/siteN) + (sim22/siteN))
else:
sim12 = 0.
siteN = 0.
for i in range(seq1.shape[0]):
if not np.isnan(substMat[seq1[i], seq2[i]]):
sim12 += substMat[seq1[i], seq2[i]]
siteN += 1.
if asDistance:
if normed:
sim12 = (siteN - sim12)/siteN
else:
sim12 = siteN - sim12
return sim12
def np_seq_similarity(seq1, seq2, substMat, normed, asDistance):
"""Computes sequence similarity based on the substitution matrix."""
if seq1.shape[0] != seq2.shape[0]:
raise IndexError("Sequences must be the same length (%d != %d)." % (seq1.shape[0], seq2.shape[0]))
"""Similarity between seq1 and seq2 using the substitution matrix subst"""
sim12 = substMat[seq1, seq2]
if normed or asDistance:
siteN = (~np.isnan(sim12)).sum()
sim11 = np.nansum(substMat[seq1, seq1])/siteN
sim22 = np.nansum(substMat[seq1, seq1])/siteN
tot12 = np.nansum(2*sim12)/(sim11+sim22)
else:
tot12 = np.nansum(sim12)
if asDistance:
"""Distance between seq1 and seq2 using the substitution matrix subst
because seq_similarity returns a total similarity with max of siteN (not per site), we use
d = siteN - sim
which is a total normed distance, not a per site distance"""
if normed:
tot12 = (siteN - tot12)/siteN
else:
tot12 = siteN - tot12
return tot12
def seq_similarity(seq1, seq2, subst = None, normed = True, asDistance = False):
"""Compare two sequences and return the similarity of one and the other
If the seqs are of different length then it raises an exception
FOR HIGHLY DIVERGENT SEQUENCES THIS NORMALIZATION DOES NOT GET TO [0,1] BECAUSE OF EXCESS NEGATIVE SCORES!
Consider normalizing the matrix first by adding the min() so that min = 0 (but do not normalize per comparison)
Return a nansum of site-wise similarities between two sequences based on a substitution matrix
[0, siteN] where siteN ignores nan similarities which may depend on gaps
sim12 = nansum(2*sim12/(nanmean(sim11) + nanmean(sim22))
Optionally specify normed = False:
[0, total raw similarity]
This returns a score [0, 1] for binary and blosum based similarities
otherwise its just the sum of the raw score out of the subst matrix"""
if subst is None:
print('Using default binarySubst matrix with binGaps for seq_similarity')
subst = addGapScores(binarySubst, binGapScores)
if isinstance(subst, dict):
subst = subst2mat(subst)
if isinstance(seq1, str):
seq1 = seq2vec(seq1)
if isinstance(seq2, str):
seq2 = seq2vec(seq2)
result = np_seq_similarity(seq1, seq2, substMat = subst, normed = normed, asDistance = asDistance)
return result
def seq_similarity_old(seq1,seq2,subst=None,normed=True):
"""Compare two sequences and return the similarity of one and the other
If the seqs are of different length then it raises an exception
FOR HIGHLY DIVERGENT SEQUENCES THIS NORMALIZATION DOES NOT GET TO [0,1] BECAUSE OF EXCESS NEGATIVE SCORES!
Consider normalizing the matrix first by adding the min() so that min = 0 (but do not normalize per comparison)
Return a nansum of site-wise similarities between two sequences based on a substitution matrix
[0, siteN] where siteN ignores nan similarities which may depend on gaps
sim12 = nansum(2*sim12/(nanmean(sim11) + nanmean(sim22))
Optionally specify normed = False:
[0, total raw similarity]
This returns a score [0, 1] for binary and blosum based similarities
otherwise its just the sum of the raw score out of the subst matrix
For a hamming similarity when there are no gaps use subst=binarySubst
and performance is optimized underneath using hamming_distance"""
assert len(seq1) == len(seq2), "len of seq1 (%d) and seq2 (%d) are different" % (len(seq1), len(seq2))
if subst is binarySubst:
dist = hamming_distance(seq1, seq2)
sim = len(seq1) - dist
if normed:
sim = sim / len(seq1)
return sim
if subst is None:
print('Using default binarySubst matrix with binGaps for seq_similarity')
subst = addGapScores(binarySubst, binGapScores)
"""Distance between seq1 and seq2 using the substitution matrix subst"""
sim12 = np.array([i for i in map(lambda a, b: subst.get((a, b), subst.get((b, a))), seq1, seq2)])
if normed:
siteN = np.sum(~np.isnan(sim12))
sim11 = seq_similarity_old(seq1, seq1, subst=subst, normed=False)/siteN
sim22 = seq_similarity_old(seq2, seq2, subst=subst, normed=False)/siteN
sim12 = np.nansum(2*sim12/(sim11+sim22))
else:
sim12 = np.nansum(sim12)
return sim12
def seq_distance(seq1, seq2, subst = None, normed = True):
"""Compare two sequences and return the distance from one to the other
If the seqs are of different length then it raises an exception
Returns a scalar [0, siteN] where siteN ignores nan similarities which may depend on gaps
Optionally returns normed = True distance:
[0, 1]
Note that either way the distance is "normed", its either per site (True) or total normed (False):
[0, siteN]"""
return seq_similarity(seq1, seq2, subst = subst, normed = normed, asDistance = True)
def unalign_similarity(seq1,seq2,subst=None):
"""Compare two sequences by aligning them first with pairwise alignment
and return the distance from one to the other"""
if subst is None:
subst = blosum90
res = pairwise2.align.globaldx(seq1, seq2, subst)
return res[0][2]
def _test_seq_similarity(subst=None,normed=True):
def test_one(s, sname, n, seq1, seq2):
print(seq1)
print(seq2)
try:
sim = seq_similarity_old(seq1, seq2, subst=s, normed=n)
print('Similarity: %f' % sim)
except:
print('Similarity: %s [%s]' % (sys.exc_info()[0], sys.exc_info()[1]))
#dist = seq_distance(seq1,seq2,subst=s)
try:
dist = seq_distance(seq1, seq2, subst=s)
print('Distance: %f' % dist)
except:
print('Distance: %s [%s]' % (sys.exc_info()[0], sys.exc_info()[1]))
print()
seqs = ['AAAA',
'AAAA',
'KKKK',
'AAKA',
'-AAA',
'-A-A']
if subst is None:
subst = [addGapScores(binarySubst, binGapScores),
addGapScores(binarySubst, nanZeroGapScores),
addGapScores(blosum90, blosum90GapScores),
addGapScores(blosum90, nanGapScores)]
names = ['addGapScores(binarySubst,binGapScores)',
'addGapScores(binarySubst,nanZeroGapScores)',
'addGapScores(blosum90,blosum90GapScores)',
'addGapScores(blosum90,nanGapScores)']
for s, sname in zip(subst, names):
print('Using %s normed = %s' % (sname, normed))
for seq1, seq2 in itertools.combinations(seqs, 2):
test_one(s, sname, normed, seq1, seq2)
else:
for seq1, seq2 in itertools.combinations(seqs, 2):
test_one(subst, 'supplied subst', normed, seq1, seq2)
def calcDistanceMatrix(seqs,normalize=False,symetric=True,metric=None,**kwargs):
"""Returns a square distance matrix with rows and columns of the unique sequences in seqs
By default will normalize by subtracting off the min() to at least get rid of negative distances
However, I don't really think this is the best option.
If symetric is True then only calculates dist[i,j] and assumes dist[j,i] == dist[i,j]
Additional kwargs are passed to the distanceFunc (e.g. subst, gapScores, normed)
Parameters
----------
seqs : list/iterator
Genetic sequences to compare.
normalize : bool
If true (default: False), subtracts off dist.min() to eliminate negative distances
(Could be improved/expanded)
symetric : bool
If True (default), then it assumes dist(A,B) == dist(B,A) and speeds up computation.
metric : function with params seq1, seq2 and possibly additional kwargs
Function will be called to compute each pairwise distance.
kwargs : additional keyword arguments
Will be passed to each call of metric()
Returns
-------
dist : ndarray of shape [len(seqs), len(seqs)]
Contains all pairwise distances for seqs.
"""
return calcDistanceRectangle_old(seqs, seqs, normalize=normalize, symetric=symetric, metric=metric, **kwargs)
def calcDistanceRectangle_old(row_seqs,col_seqs,normalize=False,symetric=False,metric=None,convertToNP=False,**kwargs):
"""Returns a rectangular distance matrix with rows and columns of the unique sequences in row_seqs and col_seqs
By default will normalize by subtracting off the min() to at least get rid of negative distances
However, I don't really think this is the best option.
If symetric is True then only calculates dist[i,j] and assumes dist[j,i] == dist[i,j]
Additional kwargs are passed to the distanceFunc (e.g. subst, gapScores, normed)
Parameters
----------
row_seqs : list/iterator
Genetic sequences to compare.
col_seqs : list/iterator
Genetic sequences to compare.
normalize : bool
If true (default: False), subtracts off dist.min() to eliminate negative distances
(Could be improved/expanded)
symetric : bool
If True (default), then it assumes dist(A,B) == dist(B,A) and speeds up computation.
metric : function with params seq1, seq2 and possibly additional kwargs
Function will be called to compute each pairwise distance.
convertToNP : bool (default: False)
If True then strings are converted to np.arrays for speed,
but metric will also need to accomodate the arrays as opposed to strings
kwargs : additional keyword arguments
Will be passed to each call of metric()
Returns
-------
dist : ndarray of shape [len(row_seqs), len(col_seqs)]
Contains all pairwise distances for seqs.
"""
if not 'normed' in list(kwargs.keys()):
kwargs['normed'] = False
if metric is None:
metric = seq_distance
"""Only compute distances on unique sequences. De-uniquify with inv_uniqi later"""
row_uSeqs, row_uniqi, row_inv_uniqi = np.unique(row_seqs, return_index=True, return_inverse=True)
col_uSeqs, col_uniqi, col_inv_uniqi = np.unique(col_seqs, return_index=True, return_inverse=True)
if convertToNP:
R = [seq2vec(s) for s in row_uSeqs]
C = [seq2vec(s) for s in col_uSeqs]
else:
R = row_uSeqs
C = col_uSeqs
dist = np.zeros((len(row_uSeqs), len(col_uSeqs)))
for i, j in itertools.product(list(range(len(row_uSeqs))), list(range(len(col_uSeqs)))):
if not symetric:
"""If not assumed symetric, compute all distances"""
dist[i, j] = metric(R[i], C[j], **kwargs)
else:
if j<i:
tmp = metric(R[i], C[j], **kwargs)
dist[i, j] = tmp
dist[j, i] = tmp
elif j>i:
pass
elif j==i:
dist[i, j] = metric(R[i], C[j], **kwargs)
if normalize:
dist = dist - dist.min()
"""De-uniquify such that dist is now shape [len(seqs), len(seqs)]"""
dist = dist[row_inv_uniqi,:][:, col_inv_uniqi]
return dist
def calcDistanceRectangle(row_seqs, col_seqs, subst=None, nb_metric=None, normalize=False, symetric=False):
"""Returns a rectangular distance matrix with rows and columns of the unique sequences in row_seqs and col_seqs
By default will normalize by subtracting off the min() to at least get rid of negative distances
However, I don't really think this is the best option.
If symetric is True then only calculates dist[i,j] and assumes dist[j,i] == dist[i,j]
TODO:
(1) Wrap this function around dist rect functins below.
(2) Define a coverage nb_metric
(3) Come up with a back-up plan for when numba import fails...
(Not jit'ing is not a good option because it will be super slow!
There need to be numpy equivalent functions as back-up...)
Additional kwargs are passed to the distanceFunc (e.g. subst, gapScores, normed)
Parameters
----------
row_seqs : list/iterator
Genetic sequences to compare.
col_seqs : list/iterator
Genetic sequences to compare.
subst : dict or ndarray
Similarity matrix for use by the metric. Can be subst or substMat (i.e. dict or ndarray)
nb_metric : numba jit'd function with params seq_vecs1, seq_vecs2 (int8) and substMat (and others?)
Function will be called to compute each pairwise distance.
normalize : bool
If true (default: False), subtracts off dist.min() to eliminate negative distances
(Could be improved/expanded)
symetric : bool
If True (default), then it assumes dist(A,B) == dist(B,A) and speeds up computation.
Returns
-------
dist : ndarray of shape [len(row_seqs), len(col_seqs)]
Contains all pairwise distances for seqs.
"""
if not 'normed' in list(kwargs.keys()):
kwargs['normed'] = False
if metric is None:
metric = seq_distance
"""Only compute distances on unique sequences. De-uniquify with inv_uniqi later"""
row_uSeqs, row_uniqi, row_inv_uniqi = np.unique(row_seqs, return_index=True, return_inverse=True)
col_uSeqs, col_uniqi, col_inv_uniqi = np.unique(col_seqs, return_index=True, return_inverse=True)
if convertToNP:
R = [seq2vec(s) for s in row_uSeqs]
C = [seq2vec(s) for s in col_uSeqs]
else:
R = row_uSeqs
C = col_uSeqs
dist = zeros((len(row_uSeqs), len(col_uSeqs)))
for i, j in itertools.product(list(range(len(row_uSeqs))), list(range(len(col_uSeqs)))):
if not symetric:
"""If not assumed symetric, compute all distances"""
dist[i, j] = metric(R[i], C[j], **kwargs)
else:
if j<i:
tmp = metric(R[i], C[j], **kwargs)
dist[i, j] = tmp
dist[j, i] = tmp
elif j>i:
pass
elif j==i:
dist[i, j] = metric(R[i], C[j], **kwargs)
if normalize:
dist = dist - dist.min()
"""De-uniquify such that dist is now shape [len(seqs), len(seqs)]"""
dist = dist[row_inv_uniqi,:][:, col_inv_uniqi]
return dist
def distRect_factory(nb_metric):
"""Can be passed a numba jit'd distance function and
will return a jit'd function for computing all pairwise distances using that function"""
@nb.jit(nb.boolean(nb.float64[:,:], nb.int8[:,:], nb.int8[:,:], nb.float64[:,:], nb.boolean), nopython=True)
def nb_distRect(pwdist, rows, cols, substMat, symetric):
n = rows.shape[0]
m = cols.shape[0]
for i in range(n):
for j in range(m):
if not symetric:
pwdist[i, j] = nb_seq_similarity(rows[i,:], cols[j,:], substMat=substMat, normed=False, asDistance=True)
else:
if j<=i:
pwdist[i, j] = nb_seq_similarity(rows[i,:], cols[j,:], substMat=substMat, normed=False, asDistance=True)
pwdist[j, i] = pwdist[i, j]
return True
return nb_distRect
def distRect(row_vecs, col_vecs, substMat, nb_metric, normalize=False, symetric=False):
"""These conversion will go in a wrapper function with the uniquing business
if subst is None:
substMat = subst2mat(addGapScores(binarySubst,binGapScores))
else:
substMat = subst2mat(subst)
if nb_metric is None:
nb_metric = nb_seq_similarity
row_vecs = seqs2mat(row_seqs)
col_vecs = seqs2mat(col_seqs)"""
nb_drect = distRect_factory(nb_metric)
pwdist = np.zeros((row_vecs.shape[0], col_vecs.shape[0]), dtype=np.float64)
success = nb_drect(pwdist, row_vecs, col_vecs, substMat, symetric)
assert success
if normalize:
pwdist = pwdist - pwdist.min()
return pwdist
#@jit()
def coverageDistance(epitope,peptide, mmTolerance = 1,**kwargs):
"""Determines whether pepitde covers epitope
and can handle epitopes and peptides of different lengths.
To be a consistent distance matrix:
covered = 0
not-covered = 1
If epitope is longer than peptide it is not covered.
Otherwise coverage is determined based on a mmTolerance
Can accomodate strings or np.arrays (but not a mix).
Parameters
----------
epitope : str or np.array
peptide : str or np.array
mmTolerance : int
Number of mismatches tolerated
If dist <= mmTolerance then it is covered
Returns
-------
covered : int
Covered (0) or not-covered (1)"""
tEpitope, tPeptide = type(epitope), type(peptide)
assert tEpitope == tPeptide
LEpitope, LPeptide = len(epitope), len(peptide)
if LEpitope > LPeptide:
return 1
if isinstance(epitope, str):
min_dist = array([np.sum([i for i in map(operator.__ne__, epitope, peptide[starti:starti+LEpitope])]) for starti in range(LPeptide-LEpitope+1)]).min()
else:
min_dist = array([(epitope != peptide[starti:starti+LEpitope]).sum() for starti in range(LPeptide-LEpitope+1)]).min()
return 0 if min_dist <= mmTolerance else 1
def embedDistanceMatrix(dist,method='tsne'):
"""MDS embedding of sequence distances in dist, returning Nx2 x,y-coords: tsne, isomap, pca, mds, kpca"""
if method == 'tsne':
xy = tsne.run_tsne(dist, no_dims=2)
#xy=pytsne.run_tsne(adist,no_dims=2)
elif method == 'isomap':
isoObj = Isomap(n_neighbors=10, n_components=2)
xy = isoObj.fit_transform(dist)
elif method == 'mds':
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=15,
dissimilarity="precomputed", n_jobs=1)
xy = mds.fit(dist).embedding_
rot = PCA(n_components=2)
xy = rot.fit_transform(xy)
elif method == 'pca':
pcaObj = PCA(n_components=2)
xy = pcaObj.fit_transform(1-dist)
elif method == 'kpca':
pcaObj = KernelPCA(n_components=2, kernel='precomputed')
xy = pcaObj.fit_transform(1-dist)
elif method == 'lle':
lle = manifold.LocallyLinearEmbedding(n_neighbors=30, n_components=2, method='standard')
xy = lle.fit_transform(dist)
return xy | mit |
tesslerc/H-DRLN | graying_the_box/hand_crafted_features/label_states_breakout.py | 1 | 3594 | import numpy as np
import matplotlib.pyplot as plt
def label_states(states, screens, termination_mat, debug_mode, num_lives):
im_size = np.sqrt(states.shape[1])
states = np.reshape(states, (states.shape[0], im_size, im_size)).astype('int16')
screens = np.reshape(np.transpose(screens), (3,210,160,-1))
screens = np.transpose(screens,(3,1,2,0))
# masks
ball_mask = np.ones_like(screens[0])
ball_mask[189:] = 0
ball_mask[57:63] = 0
ball_x_ = 80
ball_y_ = 105
td_mask = np.ones_like(screens[0])
td_mask[189:] = 0
td_mask[:25] = 0
features = {
'ball_pos': [[-1,-1],[-1,-1]],
'ball_dir': [-1,-1],
'racket': [-1,-1],
'missing_bricks': [0,0],
'hole': [0,0],
'traj': [0,0],
'time': [0,0]
}
if debug_mode:
fig1 = plt.figure('screens')
ax1 = fig1.add_subplot(111)
screen_plt = ax1.imshow(screens[0], interpolation='none')
plt.ion()
plt.show()
traj_id = 0
time = 0
strike_counter = 0
s_ = screens[1]
for i,s in enumerate(screens[2:]):
#0. TD
tdiff = (s - s_) * td_mask
s_ = s
row_ind, col_ind = np.nonzero(tdiff[:,:,0])
ball_y = np.mean(row_ind)
ball_x = np.mean(col_ind)
# #1. ball location
red_ch = s[:,:,0]
# is_red = 255 * (red_ch == 200)
# ball_filtered = np.zeros_like(s)
# ball_filtered[:,:,0] = is_red
# ball_filtered = ball_mask * ball_filtered
#
# row_ind, col_ind = np.nonzero(ball_filtered[:,:,0])
#
# ball_y = np.mean(row_ind)
# ball_x = np.mean(col_ind)
#2. ball direction
ball_dir = 0 * (ball_x >= ball_x_ and ball_y >= ball_y_) +\
1 * (ball_x >= ball_x_ and ball_y < ball_y_) +\
2 * (ball_x < ball_x_ and ball_y >= ball_y_) +\
3 * (ball_x < ball_x_ and ball_y < ball_y_)
ball_x_ = ball_x
ball_y_ = ball_y
#3. racket position
is_red = 255 * (red_ch[190,8:-8] == 200)
racket_x = np.mean(np.nonzero(is_red)) + 8
#4. number of bricks
z = red_ch[57:92,8:-8].flatten()
is_brick = np.sum(1*(z>0) + 0*(z==0))
missing_bricks = (len(z) - is_brick)/40.
#5. holes
brick_strip = red_ch[57:92,8:-8]
brick_row_sum = brick_strip.sum(axis=0)
has_hole = np.any((brick_row_sum==0))
#6. traj_id
if termination_mat[i] > 0:
strike_counter+=1
if strike_counter%num_lives==0:
traj_id += 1
time = 0
time += 1
if debug_mode:
screen_plt.set_data(s)
buf_line = ('Exqample %d: ball pos (x,y): (%0.2f, %0.2f), ball direct: %d, racket pos: (%0.2f), number of missing bricks: %d, has a hole: %d, traj id: %d, time: %d, st_cnt: %d') % \
(i, ball_x, ball_y, ball_dir, racket_x, missing_bricks, has_hole, traj_id, time, strike_counter)
print buf_line
plt.pause(0.001)
# labels[i] = (ball_x, ball_y, ball_dir, racket_x, missing_bricks, has_hole, traj_id, time)
features['ball_pos'].append([ball_x, ball_y])
features['ball_dir'].append(ball_dir)
features['racket'].append(racket_x)
features['missing_bricks'].append(missing_bricks)
features['hole'].append(has_hole)
features['traj'].append(traj_id)
features['time'].append(time)
features['n_trajs'] = traj_id
return features | mit |
thaihungle/deepexp | gen-dnc/mimic_task/report.py | 1 | 8043 | import pandas as pd
import os
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
def report_oe(path='./report/odd_even/loss'):
graph={}
for fname in os.listdir(path):
if 'previous' in fname:
continue
ffname = os.path.join(path, fname)
df = pd.read_csv(ffname,
header=0,
usecols=["Step", "Value"],
)
graph[fname]={'x':[],'y':[]}
for index, row in df.iterrows():
# print(row['Step'], row['Value'])
graph[fname]['x'].append(row['Step'])
graph[fname]['y'].append(row['Value'])
if index>=50:
break
dashList = [(5, 2), (2, 5), (4, 10), (3, 3, 2, 2), (5, 2, 20, 2)]
# List of Dash styles, each as integers in the format: (first line length, first space length, second line length, second space length...)
plt.xlabel('Step')
plt.ylabel("Loss") # add a label to the y axis
plots=[]
pnames=[]
c=0
linestyles = ['-', '--',':','-.']
markers = ['+','^', '*']
for k,v in sorted(graph.items()):
if c<len(linestyles):
print(len(v['x']))
print(len(v['y']))
plots.append(plt.plot(v['x'][:41], np.convolve(v['y'],np.ones(11)/11,mode='valid')[:41],
linestyle=linestyles[c%len(linestyles)]))
else:
plots.append(plt.plot(v['x'][:41], np.convolve(v['y'], np.ones(11) / 11, mode='valid')[:41],
marker=markers[c % len(markers)]))
pnames.append(k[:-4])
c+=1
plt.legend(pnames,shadow=True, fancybox=True, loc='lower left')
plt.show()
def report_odd_even_read_mode():
dnc=np.asarray([[0.07,0.64], [0.71,0.11], [0.24, 0.31]])
dnc_wp = np.asarray([(0.7, 0.44), (0.14, 0.45), (0.14, 0.1)])
dnc_de = np.asarray([(0.48, 0.87), (0.49, 0.12), (0.01, 0)])
dc_dnc = np.asarray([(0.57, 0.8), (0.31, 0.19), (0.1, 0)])
dcw_dnc = np.asarray([(0.61, 0.48), (0.01, 0.51), (0.37, 0)])
ld = [dnc, dnc_wp, dnc_de, dc_dnc, dcw_dnc]
name=['DNC', 'DNC (write-protected)','DNC (use previous output)','DC-MANN', 'DCw-MANN']
fig, axes = plt.subplots(nrows=1, ncols=5)
for i,d in enumerate(ld):
ind = np.arange(3)
width = 0.35
ax=axes[i]
print(i)
rects1 = ax.bar(ind, d[:,0], width, color='r')
rects2 = ax.bar(ind + width, d[:,1], width, color='y')
ax.set_ylim(0,1)
ax.set_ylabel('Weight')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(('Backward', 'Content', 'Forward'))
ax.legend((rects1[0], rects2[0]), ('Encode', 'Decode'))
ax.set_title(name[i])
plt.show()
def report_odd_even_read_mode2():
dnc=np.asarray([ [0.24, 0.25], [0.71,0.11],[0.05,0.64]])
dnc_wp = np.asarray([(0.15, 0.1), (0.15, 0.45), (0.7, 0.45)])
# dnc_de = np.asarray([(0.49, 0.87), (0.49, 0.13), (0.02, 0)])
dc_dnc = np.asarray([(0.12, 0),(0.31, 0.2), (0.57, 0.8)])
dcw_dnc = np.asarray([(0.37, 0), (0.02, 0.52), (0.61, 0.48)])
ld = [dnc, dnc_wp, dc_dnc, dcw_dnc]
ld=np.asarray(ld)
print(ld.shape)
name=['DNC','DNC (write-protected)','DC-MANN', 'DCw-MANN']
fig, ax = plt.subplots()
ind = np.arange(len(ld))
width = 0.35
erects1 = ax.bar(ind, ld[:,0,0], width, color='r')
erects2 = ax.bar(ind, ld[:,1,0], width, color='y', bottom=ld[:,0,0])
erects3 = ax.bar(ind , ld[:,2,0], width, color='b', bottom=ld[:,0,0]+ld[:,1,0])
drects1 = ax.bar(ind+width+0.1/2, ld[:,0,1], width, color='pink')
drects2 = ax.bar(ind+width+0.1/2, ld[:,1,1], width, color='orange',bottom=ld[:,0,1])
drects3 = ax.bar(ind+width+0.1/2, ld[:,2,1], width, color='black',bottom=ld[:,0,1]+ld[:,1,1])
ax.legend((erects1[0], erects2[0], erects3[0],drects1[0], drects2[0], drects3[0]),
('Encoder\'s Backward Read', 'Encoder\'s Content Read', 'Encoder\'s Forward Read',
'Decoder\'s Backward Read', 'Decoder\'s Content Read', 'Decoder\'s Forward Read'),
loc='upper center', bbox_to_anchor=(0.5,1.4), fancybox=True, ncol=2)
plt.tight_layout(rect=[0,0,1,0.7])
ax.set_ylim(0,1)
ax.set_ylabel('Weight')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(name)
# ax.legend((rects1[0], rects2[0]), ('Encode', 'Decode'))
# ax.set_title(name[i])
plt.show()
def report_drug_read_mode2():
dnc=np.asarray([[0.32,0.03], [0.14,0.48], [0.54, 0.49]])
#dnc_wp = np.asarray([(0.7, 0.45), (0.15, 0.45), (0.15, 0.1)])
# dnc_de = np.asarray([(0.49, 0.87), (0.49, 0.13), (0.02, 0)])
dc_dnc = np.asarray([(0.48, 0.02), (0.38, 0.56), (0.14, 0.42)])
dcw_dnc = np.asarray([(0.05, 0.1), (0.64, 0.37), (0.31, 0.53)])
ld = [dnc, dc_dnc, dcw_dnc]
ld=np.asarray(ld)
print(ld.shape)
name=['DNC','DC-MANN', 'DCw-MANN']
fig, ax = plt.subplots()
ind = np.arange(len(ld))
width = 0.35
erects1 = ax.bar(ind, ld[:,0,0], width, color='r')
erects2 = ax.bar(ind, ld[:,1,0], width, color='y', bottom=ld[:,0,0])
erects3 = ax.bar(ind , ld[:,2,0], width, color='b', bottom=ld[:,0,0]+ld[:,1,0])
drects1 = ax.bar(ind+width+0.1/2, ld[:,0,1], width, color='pink')
drects2 = ax.bar(ind+width+0.1/2, ld[:,1,1], width, color='orange',bottom=ld[:,0,1])
drects3 = ax.bar(ind+width+0.1/2, ld[:,2,1], width, color='black',bottom=ld[:,0,1]+ld[:,1,1])
ax.legend((erects1[0], erects2[0], erects3[0],drects1[0], drects2[0], drects3[0]),
('Encoder\'s Backward Read', 'Encoder\'s Content-Based Read', 'Encoder\'s Forward Read',
'Decoder\'s Backward Read', 'Decoder\'s Content-Based Read', 'Decoder\'s Forward Read'), loc='lower center')
ax.set_ylim(0,1)
ax.set_ylabel('Weight of modes')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(name)
# ax.legend((rects1[0], rects2[0]), ('Encode', 'Decode'))
# ax.set_title(name[i])
plt.show()
def report_drug(path='./data/report/drug_mem_train_loss/', limit=23):
graph = {}
for fname in os.listdir(path):
ffname = os.path.join(path, fname)
df = pd.read_csv(ffname,
header=0,
usecols=["Step", "Value"],
)
graph[fname] = {'x': [], 'y': []}
for index, row in df.iterrows():
if index>limit:
break
# print(row['Step'], row['Value'])
graph[fname]['x'].append(row['Step'])
graph[fname]['y'].append(row['Value'])
dashList = [(5, 2), (2, 5), (4, 10), (3, 3, 2, 2), (5, 2, 20, 2)]
# List of Dash styles, each as integers in the format: (first line length, first space length, second line length, second space length...)
plt.xlabel('Step')
plt.ylabel("Training Loss") # add a label to the y axis
plots = []
pnames = []
c = 0
linestyles = ['-', '--', ':', '-.']
for k, v in sorted(graph.items()):
plots.append(plt.plot(v['x'], v['y'], linestyle=linestyles[c%(len(linestyles))]))
pnames.append(k[:-4])
c += 1
plt.legend(pnames, shadow=True, fancybox=True)
plt.show()
import pickle
from nltk.collocations import BigramCollocationFinder
import nltk
def view_ngram_proc():
dig_list = pickle.load(open('./data/seq2seq2/dig_records2.pkl', 'rb'))
# pro_list = pickle.load(open('./data/seq2seq2/pro_records2.pkl', 'rb'))
flat_list = [item for sublist in dig_list for item in sublist]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(flat_list)
print(finder.nbest(bigram_measures.pmi, 100))
if __name__ == '__main__':
# report_oe(path='./data/report/odd-even-edit')
report_oe(path='./data/report/odd-even-loss-long')
# report_drug(path='./data/report/drug_mem_train_loss')
# report_drug(path='./data/report/drug_test_loss')
#report_drug_read_mode2()
# report_odd_even_read_mode2()
# view_ngram_proc() | mit |
ihmeuw/vivarium | src/vivarium/framework/state_machine.py | 1 | 16689 | """
=============
State Machine
=============
A state machine implementation for use in ``vivarium`` simulations.
"""
from enum import Enum
from typing import Callable, List, Iterable, Tuple, TYPE_CHECKING
import pandas as pd
import numpy as np
if TYPE_CHECKING:
from vivarium.framework.engine import Builder
from vivarium.framework.population import PopulationView
from vivarium.framework.time import Time
def _next_state(index: pd.Index,
event_time: 'Time',
transition_set: 'TransitionSet',
population_view: 'PopulationView') -> None:
"""Moves a population between different states using information from a `TransitionSet`.
Parameters
----------
index
An iterable of integer labels for the simulants.
event_time
When this transition is occurring.
transition_set
A set of potential transitions available to the simulants.
population_view
A view of the internal state of the simulation.
"""
if len(transition_set) == 0 or index.empty:
return
outputs, decisions = transition_set.choose_new_state(index)
groups = _groupby_new_state(index, outputs, decisions)
if groups:
for output, affected_index in sorted(groups, key=lambda x: str(x[0])):
if output == 'null_transition':
pass
elif isinstance(output, Transient):
if not isinstance(output, State):
raise ValueError('Invalid transition output: {}'.format(output))
output.transition_effect(affected_index, event_time, population_view)
output.next_state(affected_index, event_time, population_view)
elif isinstance(output, State):
output.transition_effect(affected_index, event_time, population_view)
else:
raise ValueError('Invalid transition output: {}'.format(output))
def _groupby_new_state(index: pd.Index, outputs: List, decisions: pd.Series) -> List[Tuple[str, pd.Index]]:
"""Groups the simulants in the index by their new output state.
Parameters
----------
index
An iterable of integer labels for the simulants.
outputs
A list of possible output states.
decisions
A series containing the name of the next state for each simulant in the
index.
Returns
-------
List[Tuple[str, pandas.Index]
The first item in each tuple is the name of an output state and the
second item is a `pandas.Index` representing the simulants to transition
into that state.
"""
output_map = {o: i for i, o in enumerate(outputs)}
groups = pd.Series(index).groupby([output_map[d] for d in decisions])
results = [(outputs[i], pd.Index(sub_group.values)) for i, sub_group in groups]
selected_outputs = [o for o, _ in results]
for output in outputs:
if output not in selected_outputs:
results.append((output, pd.Index([])))
return results
class Trigger(Enum):
NOT_TRIGGERED = 0
START_INACTIVE = 1
START_ACTIVE = 2
def _process_trigger(trigger):
if trigger == Trigger.NOT_TRIGGERED:
return None, False
elif trigger == Trigger.START_INACTIVE:
return pd.Index([]), False
elif trigger == Trigger.START_ACTIVE:
return pd.Index([]), True
else:
raise ValueError("Invalid trigger state provided: {}".format(trigger))
class Transition:
"""A process by which an entity might change into a particular state.
Parameters
----------
input_state
The start state of the entity that undergoes the transition.
output_state
The end state of the entity that undergoes the transition.
probability_func
A method or function that describing the probability of this
transition occurring.
"""
def __init__(self,
input_state: 'State',
output_state: 'State',
probability_func: Callable[[pd.Index], pd.Series] = lambda index: pd.Series(1, index=index),
triggered=Trigger.NOT_TRIGGERED):
self.input_state = input_state
self.output_state = output_state
self._probability = probability_func
self._active_index, self.start_active = _process_trigger(triggered)
@property
def name(self) -> str:
transition_type = self.__class__.__name__.lower()
return f"{transition_type}.{self.input_state.name}.{self.output_state.name}"
def setup(self, builder: 'Builder') -> None:
pass
def set_active(self, index: pd.Index) -> None:
if self._active_index is None:
raise ValueError("This transition is not triggered. An active index cannot be set or modified.")
else:
self._active_index = self._active_index.union(pd.Index(index))
def set_inactive(self, index: pd.Index) -> None:
if self._active_index is None:
raise ValueError("This transition is not triggered. An active index cannot be set or modified.")
else:
self._active_index = self._active_index.difference(pd.Index(index))
def probability(self, index: pd.Index) -> pd.Series:
if self._active_index is None:
return self._probability(index)
index = pd.Index(index)
activated_index = self._active_index.intersection(index)
null_index = index.difference(self._active_index)
activated = pd.Series(self._probability(activated_index), index=activated_index)
null = pd.Series(np.zeros(len(null_index), dtype=float), index=null_index)
return activated.append(null)
def __repr__(self):
c = self.__class__.__name__
return f'{c}({self.input_state}, {self.output_state})'
class State:
"""An abstract representation of a particular position in a state space.
Attributes
----------
state_id
The name of this state. This should be unique
transition_set
A container for potential transitions out of this state.
"""
def __init__(self, state_id: str):
self.state_id = state_id
self.transition_set = TransitionSet(self.name)
self._model = None
self._sub_components = [self.transition_set]
@property
def name(self) -> str:
state_type = self.__class__.__name__.lower()
return f"{state_type}.{self.state_id}"
@property
def sub_components(self) -> List:
return self._sub_components
def setup(self, builder: 'Builder') -> None:
pass
def next_state(self, index: pd.Index, event_time: 'Time', population_view: 'PopulationView') -> None:
"""Moves a population between different states.
Parameters
----------
index
An iterable of integer labels for the simulants.
event_time
When this transition is occurring.
population_view
A view of the internal state of the simulation.
"""
return _next_state(index, event_time, self.transition_set, population_view)
def transition_effect(self, index: pd.Index, event_time: 'Time', population_view: 'PopulationView') -> None:
"""Updates the simulation state and triggers any side-effects associated with entering this state.
Parameters
----------
index
An iterable of integer labels for the simulants.
event_time
The time at which this transition occurs.
population_view
A view of the internal state of the simulation.
"""
population_view.update(pd.Series(self.state_id, index=index))
self._transition_side_effect(index, event_time)
def cleanup_effect(self, index: pd.Index, event_time: 'Time') -> None:
self._cleanup_effect(index, event_time)
def add_transition(self, output: 'State',
probability_func: Callable[[pd.Index], pd.Series] = lambda index: pd.Series(1.0, index=index),
triggered=Trigger.NOT_TRIGGERED) -> Transition:
"""Builds a transition from this state to the given state.
Parameters
----------
output
The end state after the transition.
Returns
-------
Transition
The created transition object.
"""
t = Transition(self, output, probability_func=probability_func, triggered=triggered)
self.transition_set.append(t)
return t
def allow_self_transitions(self) -> None:
self.transition_set.allow_null_transition = True
def _transition_side_effect(self, index: pd.Index, event_time: 'Time') -> None:
pass
def _cleanup_effect(self, index: pd.Index, event_time: 'Time') -> None:
pass
def __repr__(self):
c = self.__class__.__name__
return f'{c}({self.state_id})'
class Transient:
"""Used to tell _next_state to transition a second time."""
pass
class TransientState(State, Transient):
def __repr__(self):
return f'TransientState({self.state_id})'
class TransitionSet:
"""A container for state machine transitions.
Parameters
----------
state_name
The unique name of the state that instantiated this TransitionSet. Typically
a string but any object implementing __str__ will do.
iterable
Any iterable whose elements are `Transition` objects.
allow_null_transition
"""
def __init__(self, state_name: str, *transitions: Transition, allow_null_transition: bool = False):
self._state_name = state_name
self.allow_null_transition = allow_null_transition
self.transitions = []
self._sub_components = self.transitions
self.extend(transitions)
@property
def name(self) -> str:
return f'transition_set.{self._state_name}'
@property
def sub_components(self) -> List:
return self._sub_components
def setup(self, builder: 'Builder') -> None:
"""Performs this component's simulation setup and return sub-components.
Parameters
----------
builder
Interface to several simulation tools including access to common random
number generation, in particular.
"""
self.random = builder.randomness.get_stream(self.name)
def choose_new_state(self, index: pd.Index) -> Tuple[List, pd.Series]:
"""Chooses a new state for each simulant in the index.
Parameters
----------
index
An iterable of integer labels for the simulants.
Returns
-------
List
The possible end states of this set of transitions.
pandas.Series
A series containing the name of the next state for each simulant
in the index.
"""
outputs, probabilities = zip(*[(transition.output_state, np.array(transition.probability(index)))
for transition in self.transitions])
probabilities = np.transpose(probabilities)
outputs, probabilities = self._normalize_probabilities(outputs, probabilities)
return outputs, self.random.choice(index, outputs, probabilities)
def _normalize_probabilities(self, outputs, probabilities):
"""Normalize probabilities to sum to 1 and add a null transition.
Parameters
----------
outputs
List of possible end states corresponding to this containers
transitions.
probabilities
A set of probability weights whose columns correspond to the end
states in `outputs` and whose rows correspond to each simulant
undergoing the transition.
Returns
-------
List
The original output list expanded to include a null transition (a
transition back to the starting state) if requested.
numpy.ndarray
The original probabilities rescaled to sum to 1 and potentially
expanded to include a null transition weight.
"""
outputs = list(outputs)
# This is mainly for flexibility with the triggered transitions.
# We may have multiple out transitions from a state where one of them
# is gated until some criteria is met. After the criteria is
# met, the gated transition becomes the default (likely as opposed
# to a self transition).
default_transition_count = np.sum(probabilities == 1, axis=1)
if np.any(default_transition_count > 1):
raise ValueError("Multiple transitions specified with probability 1.")
has_default = default_transition_count == 1
total = np.sum(probabilities, axis=1)
probabilities[has_default] /= total[has_default, np.newaxis]
total = np.sum(probabilities, axis=1) # All totals should be ~<= 1 at this point.
if self.allow_null_transition:
if np.any(total > 1+1e-08): # Accommodate rounding errors
raise ValueError(f"Null transition requested with un-normalized "
f"probability weights: {probabilities}")
total[total > 1] = 1 # Correct allowed rounding errors.
probabilities = np.concatenate([probabilities, (1-total)[:, np.newaxis]], axis=1)
outputs.append('null_transition')
else:
if np.any(total == 0):
raise ValueError("No valid transitions for some simulants.")
else: # total might be less than zero in some places
probabilities /= total[:, np.newaxis]
return outputs, probabilities
def append(self, transition: Transition) -> None:
if not isinstance(transition, Transition):
raise TypeError(
'TransitionSet must contain only Transition objects. Check constructor arguments: {}'.format(self))
self.transitions.append(transition)
def extend(self, transitions: Iterable[Transition]) -> None:
for transition in transitions:
self.append(transition)
def __iter__(self):
return iter(self.transitions)
def __len__(self):
return len(self.transitions)
def __repr__(self):
return f"TransitionSet(transitions={[x for x in self.transitions]})"
def __hash__(self):
return hash(id(self))
class Machine:
"""A collection of states and transitions between those states.
Attributes
----------
states
The collection of states represented by this state machine.
state_column
A label for the piece of simulation state governed by this state machine.
population_view
A view of the internal state of the simulation.
"""
def __init__(self, state_column: str, states: Iterable[State] = ()):
self.states = []
self.state_column = state_column
if states:
self.add_states(states)
@property
def name(self) -> str:
machine_type = self.__class__.__name__.lower()
return f"{machine_type}.{self.state_column}"
@property
def sub_components(self):
return self.states
def setup(self, builder: 'Builder') -> None:
self.population_view = builder.population.get_view([self.state_column])
def add_states(self, states: Iterable[State]) -> None:
for state in states:
self.states.append(state)
state._model = self.state_column
def transition(self, index: pd.Index, event_time: 'Time') -> None:
"""Finds the population in each state and moves them to the next state.
Parameters
----------
index
An iterable of integer labels for the simulants.
event_time
The time at which this transition occurs.
"""
for state, affected in self._get_state_pops(index):
if not affected.empty:
state.next_state(affected.index, event_time, self.population_view.subview([self.state_column]))
def cleanup(self, index: pd.Index, event_time: 'Time') -> None:
for state, affected in self._get_state_pops(index):
if not affected.empty:
state.cleanup_effect(affected.index, event_time)
def _get_state_pops(self, index: pd.Index) -> List[Tuple[State, pd.DataFrame]]:
population = self.population_view.get(index)
return [(state, population[population[self.state_column] == state.state_id]) for state in self.states]
def __repr__(self):
return f"Machine(state_column= {self.state_column})"
| gpl-3.0 |
val-iisc/deligan | src/mnist/plot_data.py | 1 | 1775 | # takes data saved by DRAW model and generates animations
# example usage: python plot_data.py noattn /tmp/draw/draw_data.npy
import matplotlib
import sys
import numpy as np
interactive=False # set to False if you want to write images to file
if not interactive:
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
import matplotlib.pyplot as plt
def xrecons_grid(X,B,A):
"""
plots canvas for single time step
X is x_recons, (batch_size x img_size)
assumes features = BxA images
batch is assumed to be a square number
"""
padsize=1
padval=.5
ph=B+2*padsize
pw=A+2*padsize
batch_size=X.shape[0]
N=int(np.sqrt(batch_size))
X=X.reshape((N,N,B,A))
img=np.ones((N*ph,N*pw))*padval
for i in range(N):
for j in range(N):
startr=i*ph+padsize
endr=startr+B
startc=j*pw+padsize
endc=startc+A
img[startr:endr,startc:endc]=X[i,j,:,:]
return img
if __name__ == '__main__':
prefix=sys.argv[1]
out_file=sys.argv[2]
[C,Lxs,Lzs]=np.load(out_file)
T,batch_size,img_size=C.shape
# X=1.0/(1.0+np.exp(-C)) # x_recons=sigmoid(canvas)
X=np.maximum(C,0) #x_recons=relu(canvas)
B=A=int(np.sqrt(img_size))
if interactive:
f,arr=plt.subplots(1,T)
for t in range(T):
img=xrecons_grid(X[t,:,:],B,A)
if interactive:
arr[t].matshow(img,cmap=plt.cm.gray)
arr[t].set_xticks([])
arr[t].set_yticks([])
else:
plt.matshow(img,cmap=plt.cm.gray)
imgname='%s_%d.png' % (prefix,t) # you can merge using imagemagick, i.e. convert -delay 10 -loop 0 *.png mnist.gif
plt.savefig(imgname)
print(imgname)
f=plt.figure()
plt.plot(Lxs,label='Reconstruction Loss Lx')
plt.plot(Lzs,label='Latent Loss Lz')
plt.xlabel('iterations')
plt.legend()
if interactive:
plt.show()
else:
plt.savefig('%s_loss.png' % (prefix))
| mit |
lyebi/Test | test.py | 1 | 2676 | # import numpy as np
# # import gym
# # import universe
# # env=gym.make('MontezumaRevenge-v0')
# # state=env.reset()
# # state=np.uint32(state)
# # import copy
# # import cv2
# #
# # cv2.imshow('img',np.uint8(state))
# # cv2.waitKey()
# #
# #
# #
# # for i in range(1000):
# # a=env.action_space.sample()
# # tmp,_,_,_=env.step(a)
# # tmp=np.uint32(tmp)
# # state+=tmp
# # state1=np.uint32(state/(i+2))
# #
# # # cv2.imshow('sad',state)
# # cv2.imshow('sum',np.uint8(state1))
# # cv2.waitKey()
#
# import itchat
#
# itchat.login()
# friends=itchat.get_friends(update=True)[0:]
#
# male=female = other=0
#
# for i in friends[1:]:
# sex=i["Sex"]
# if sex==1:
# male+=1
# elif sex==2:
# female+=1
# else:
# other+=1
#
# total=len(friends[1:])
#
# print('male:',male/total)
# print('female:',female/total)
#
# def get_var(var):
# variable=[]
# for i in friends:
# value=i[var]
# variable.append(value)
# return variable
#
#
# NickName=get_var("NickName")
# Sex=get_var("Sex")
# Province=get_var('Province')
# City=get_var('City')
# Sig=get_var('Signature')
# from pandas import DataFrame
# data={'Nickname':[NickName],'Sex':[Sex],'Province':[Province],'City':[City],'Signature':[Sig]}
# frame=DataFrame(data)
# frame.to_csv('data.csv',index=True)
#
#
# import re
# siglist=[]
# for i in friends:
# signature=i["Signature"].strip().replace("span","").replace("class","").replace("emoji","")
# rep=re.compile("1f\d+\w*|[<>/=]")
# signature=rep.sub("",signature)
# siglist.append(signature)
# text="".join(siglist)
#
# import jieba
# worldlist=jieba.cut(text,cut_all=True)
# word_space_split=" ".join(worldlist)
#
#
#
# import matplotlib.pyplot as plt
# from wordcloud import WordCloud, ImageColorGenerator
# import PIL.Image as Image
# # coloring = np.array(Image.open("/wechat.jpg"))
# my_wordcloud = WordCloud(background_color="white", max_words=2000,
# max_font_size=60, random_state=42, scale=2,
# ).generate(word_space_split)
#
# # image_colors = ImageColorGenerator(coloring)
# # plt.imshow(my_wordcloud.recolor(color_func=image_colors))
# plt.imshow(my_wordcloud)
# plt.axis("off")
# plt.show()
#
#
#
#
# import gym
# import cv2
import numpy as np
# from pygame.locals import *
# import pygame,sys
# from pynput.mouse import Button,Controller
# from envs import *
#
# env = create_atari_env('MontezumaRevenge-v0')
# state=env.reset()[6:10,20:60]
# cv2.imshow('img',state)
# cv2.waitKey()
#
#
# print(np.shape(state))
import gym
env = gym.make("MontezumaRevenge-v0")
env.reset()
print(env.action_space.n)
| gpl-2.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/manifold/tests/test_t_sne.py | 28 | 24487 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
def test_n_iter_without_progress():
# Make sure that the parameter n_iter_without_progress is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last 2 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '')
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
| mit |
terkkila/scikit-learn | sklearn/preprocessing/tests/test_label.py | 35 | 18559 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
@ignore_warnings
def test_label_binarizer_column_y():
# first for binary classification vs multi-label with 1 possible class
# lists are multi-label, array is multi-class :-/
inp_list = [[1], [2], [1]]
inp_array = np.array(inp_list)
multilabel_indicator = np.array([[1, 0], [0, 1], [1, 0]])
binaryclass_array = np.array([[0], [1], [0]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, multilabel_indicator)
assert_array_equal(out_2, binaryclass_array)
# second for multiclass classification vs multi-label with multiple
# classes
inp_list = [[1], [2], [1], [3]]
inp_array = np.array(inp_list)
# the indicator matrix output is the same in this case
indicator = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, out_2)
assert_array_equal(out_2, indicator)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
jorik041/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
gclenaghan/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 17 | 21089 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
m = "'init' must be 'pca', 'random' or a NumPy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitraryily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitraryily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitraryily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/series/test_timezones.py | 2 | 1296 | """
Tests for Series timezone-related methods
"""
from datetime import datetime
from dateutil.tz import tzoffset
import numpy as np
import pytest
from pandas import Series
import pandas._testing as tm
from pandas.core.indexes.datetimes import date_range
class TestSeriesTimezones:
def test_dateutil_tzoffset_support(self):
values = [188.5, 328.25]
tzinfo = tzoffset(None, 7200)
index = [
datetime(2012, 5, 11, 11, tzinfo=tzinfo),
datetime(2012, 5, 11, 12, tzinfo=tzinfo),
]
series = Series(data=values, index=index)
assert series.index.tz == tzinfo
# it works! #2443
repr(series.index[0])
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize(
"method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]]
)
def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz):
# GH 6326
result = Series(
np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz)
)
getattr(result, method)("UTC", copy=copy)
expected = Series(
np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz)
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
felipessalvatore/MyTwitterBot | src/agent/Bot.py | 1 | 11963 | import tweepy
import pandas as pd
import time
import numpy as np
try:
from key import ConsumerKey, ConsumerSecret
from key import AccessToken, AccessTokenSecret
except ImportError:
from agent.key import ConsumerKey, ConsumerSecret
from agent.key import AccessToken, AccessTokenSecret
import os
import sys
import inspect
from requests_oauthlib import OAuth1Session
import json
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from utils import get_real_friends, get_date, get_date_and_time
from twitter.TweetGenerator import TweetGenerator
from twitter.functions import TweetValid
from text_processing.functions import file_len
class Bot():
"""
The autonomous agent behind the twitter account.
This class assumes that you have the file "key.py"
in the folder "agent".
In "key.py" I assume you have the variables:
"ConsumerKey" , "ConsumerSecret", "AccessToken"
and "AccessTokenSecret". For more info on how to get
the value of these variables go watch this video on
youtube https://www.youtube.com/watch?v=M7MqML2ZVOY
:type corpus: str
:type friends: list of str
:type commentary: srt
:type black_list: list
:type local: str
:type hashtag_search: None or list
"""
def __init__(self,
corpus,
friends=[],
commentary="None",
black_list=[],
local="world",
hashtag_search=None):
self.black_list = black_list
self.local = local
self.friends = friends
self.corpus = corpus
auth = tweepy.OAuthHandler(ConsumerKey, ConsumerSecret)
auth.set_access_token(AccessToken, AccessTokenSecret)
self.api = tweepy.API(auth)
entry = [("Date", [get_date()]),
("Followers", [len(self.api.followers_ids())]),
("Following", [len(self.api.friends_ids())]),
("Commentary", [commentary])]
self.df = pd.DataFrame.from_items(entry)
self.log()
if hashtag_search is None:
self.hashtag_search = self.get_trends(self.local)
else:
self.hashtag_search = hashtag_search + self.get_trends(self.local)
def clear_follow(self,
Realfriends=get_real_friends()):
"""
Method to remove all the people that the bot followers
that are not in the list "Realfriends"
:type Realfriends: list of int
"""
friends = self.api.friends_ids()
for friend in friends:
if friend not in Realfriends:
self.api.destroy_friendship(friend)
def log(self):
"""
Method to save the twitter status on a csv for
future reference.
"""
log_folder = os.path.join(os.getcwd(), "twitter_log")
csv_name = os.path.join(log_folder, "stats.csv")
if not os.path.exists(log_folder):
os.makedirs(log_folder)
try:
old_df = pd.read_csv(csv_name)
new_df = old_df.append(self.df, ignore_index=True)
new_df.to_csv(csv_name, index=False)
except OSError:
self.df.to_csv(csv_name, index=False)
def get_local_identifier(self):
"""
Method to get dict local: identifier.
the identifier is of type WOEID (Where On Earth IDentifier).
:rtype: dict
"""
WOEID = {"world": "1",
"EUA": "23424977",
"Brazil": "23424768"}
return WOEID
def get_trends(self, local):
"""
Method to get the trending hashtags.
:type local: str
:rtype: list of str
"""
session_string = "https://api.twitter.com/1.1/trends/place.json?id="
local_id = self.get_local_identifier()[local]
session_string += local_id
session = OAuth1Session(ConsumerKey,
ConsumerSecret,
AccessToken,
AccessTokenSecret)
response = session.get(session_string)
if response.__dict__['status_code'] == 200:
local_trends = json.loads(response.text)[0]["trends"]
hashtags = [trend["name"]
for trend in local_trends if trend["name"][0] == '#']
else:
hashtags = []
return hashtags
def curator_writer(self,
num_tweets,
show_tweets=10,
num_hashtags=5):
"""
Method to write "num_tweets" tweets. Here I use a loop
to get an input to the user to choose one tweet.
At the end of the loop the method write a txt file with
all the tweets. We use the trending hashtags and the bot's
frieds to compose the tweet.
:type num_tweets: int
:type num_hashtags: int
:rtype: str
"""
saved_tweets = []
tg = TweetGenerator(text_path=self.corpus,
black_list=self.black_list,
train=False)
while len(saved_tweets) < num_tweets:
print(('=-=' * 5))
print("You have {} saved tweets so far.".format(len(saved_tweets)))
print("Type the beginning of a tweet")
print(('=-=' * 5))
first_part = input('> ')
if not TweetValid(first_part):
first_part = '<eos>'
print("Too long!!\nstarting text = <eos>")
hashtags = self.get_trends(self.local)
hashtags_and_friends = self.friends + hashtags
h_and_f_size = len(hashtags_and_friends)
if h_and_f_size < num_hashtags:
num_hashtags = max(len(hashtags_and_friends) - 1, 1)
print("Picking only {} hashtags".format(num_hashtags))
if h_and_f_size > 0:
choice = np.random.choice(h_and_f_size, num_hashtags)
my_hashtags = [hashtags_and_friends[i] for i in choice]
else:
my_hashtags = []
tweets = tg.generate_tweet_list(number_of_tweets=show_tweets,
starting_text=first_part,
hashtag_list=my_hashtags)
for i, tweet in enumerate(tweets):
print("{0}) {1}".format(i, tweet))
user_choice = -1
number_of_tweets = len(tweets)
while True:
print(('=-=' * 5))
print("Choose one tweet!")
print("Type a number from 0 to {}".format(number_of_tweets - 1))
print("Or type -99 to generate other tweets")
print(('=-=' * 5))
user_choice = input('> ')
try:
user_choice = int(user_choice)
except ValueError:
print("Oops! That was no valid number.")
if user_choice == -99 or user_choice in range(number_of_tweets):
break
if user_choice >= 0:
saved_tweets.append(tweets[user_choice])
draft_folder = os.path.join(os.getcwd(), "twitter_draft")
filename = os.path.join(draft_folder, get_date_and_time() + ".txt")
if not os.path.exists(draft_folder):
os.makedirs(draft_folder)
with open(filename, "w") as f:
for tweet in saved_tweets:
f.write(tweet + "\n")
return filename
def post_from_txt(self,
text_path,
minutes_paused=2,
num_tweets_to_see=51):
"""
Method to post all the tweets from the txt in "text_path".
Each tweet is posted and after that the bot starts to
liking tweets that have the same hasthags as the ones in the list
self.hashtag_search, the bot also retweet the theets and follow the
user. After that it pause for "minutes_paused" minutes
(default is 2 minutes).
:type text_path: str
:type minutes_paused: int
:type num_tweets_to_see: int
"""
seconds_pause = minutes_paused * 60
num_tweets = file_len(text_path)
with open(text_path) as file:
for i, tweet in enumerate(file):
if TweetValid(tweet):
print("Posting {0} from {1}".format(i, num_tweets))
self.api.update_status(tweet)
choice = np.random.choice(len(self.hashtag_search), 1)[0]
current_hashtag = self.hashtag_search[choice]
print("\ncurrent hashtag is {}".format(current_hashtag))
count = 0
for tweet in tweepy.Cursor(self.api.search,
q=current_hashtag).items():
print("\ncount = {}".format(count))
if count < num_tweets_to_see:
try:
# Favorite the tweet
tweet.favorite()
print('Favorited the tweet')
# Follow the user who tweeted
tweet.user.follow()
print('Followed the user')
if count % 25 == 0:
tweet.retweet()
print('Retweeted the tweet')
print("\nWaiting {} minutes".format(minutes_paused))
time.sleep(seconds_pause)
count += 1
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
print("No more tweets for the hashtag = {}".format(current_hashtag))
break
else:
print("\ncount = {}, above upper bound".format(count))
break
def write(self,
num_tweets,
first_part='<eos>',
num_hashtags=5,
minutes_pause=60,
publish=True):
"""
Method to write "num_tweets" tweets, using the string
"first part" as the begining of the tweet and
using "num_hashtags" hashtags
Each tweet is posted after a pause of
"minutes_pause" minutes (default is one hour).
:type num_tweets: int
:type num_hashtags: int
:type minutes_pause: int
:type publish: boolean
"""
seconds_pause = minutes_pause * 60
tg = TweetGenerator(text_path=self.corpus,
black_list=self.black_list,
train=False)
for i in range(num_tweets):
trends = self.api.trends_place(1)[0]['trends']
TrendsNames = [trend['name'] for trend in trends]
hashtags = [words for words in TrendsNames if words[0] == "#"]
if len(hashtags) < num_hashtags:
num_hashtags = max(len(hashtags) - 1, 1)
print("Picking only {} hashtags".format(num_hashtags))
choice = np.random.choice(len(hashtags), num_hashtags)
my_hashtags = [hashtags[i] for i in choice]
tweet = tg.generate_tweet_list(starting_text=first_part,
hashtag_list=my_hashtags)[0]
print("\nThe {} tweet is:\n".format(i), tweet)
if publish:
self.api.update_status(tweet)
print("Waiting {} minutes".format(minutes_pause))
time.sleep(seconds_pause)
| mit |
JohnCEarls/tcdiracweb | tcdiracweb/utils/maketsv.py | 1 | 18620 | from datadirac import data
import itertools
import pandas
from collections import defaultdict
import numpy as np
import os.path
import json
import boto
from boto.s3.key import Key
import cPickle as pickle
import random
import re
import string
import masterdirac.models.run as run_mdl
opj = os.path.join
class TSVGen:
def __init__(self, net_table, net_source_id, source_dataframe, metadata_file, app_path, source_bucket, data_path):
self._net_table = net_table
self._net_source_id = net_source_id
self.df = source_dataframe
self.meta = metadata_file
self._app_path = app_path
self._data_path = data_path
self._source_bucket = source_bucket
self._local_data_path = opj( self._app_path, self._data_path.strip('/') )
self.getData()
self.loadData()
def getData(self):
self._download_data( self.df )
self._download_data( self.meta )
def _download_data(self, fname):
if not os.path.exists( opj( self._local_data_path, fname ) ):
s3 = boto.connect_s3()
b = s3.get_bucket(self._source_bucket)
k = Key(b)
k.key = fname
f = self.strip_path( fname )
k.get_contents_to_filename( opj( self._local_data_path, f ) )
def strip_path( self, key_name):
p, f = os.path.split( key_name )
return f
def loadData( self ):
sd = data.SourceData()
sd.load_dataframe( opj( self._local_data_path, self.strip_path(self.df)) )
sd.load_net_info(self._net_table, self._net_source_id )
mi = data.MetaInfo( opj( self._local_data_path, self.strip_path(self.meta) ) )
self._sd = sd
self._mi = mi
def jitter( self, ages, order=.001):
"""
Given a list of ages, adjust each randomly by a small amount.
This is done to make each age unique, if we don't want aggregation.
"""
return [age + (random.random()*order) for age in ages]
def gen_bivariate( self, pathway, by_rank=False, jitter=True ):
genes = self._sd.get_genes( pathway )
descriptions = []
web_path = self._data_path
by_rank = False
for i in range(2):
for strain in self._mi.get_strains():
alleles = self._mi.get_nominal_alleles( strain )
for a1, a2 in itertools.combinations(alleles,2):
sid1 = self._mi.get_sample_ids( strain, a1)
ages1 = [self._mi.get_age( s ) for s in sid1]
if jitter:
ages1 = self.jitter(ages1)
sid2 = self._mi.get_sample_ids( strain, a2)
ages2 = [self._mi.get_age( s ) for s in sid2]
if jitter:
ages2 = self.jitter(ages2)
sub1 = self._sd.get_expression( sid1 )
pw_sub1 = sub1.loc[genes,:]
if by_rank:
pw_sub1T = pw_sub1.transpose().rank(axis=1, ascending=False)
else:
pw_sub1T = pw_sub1.transpose()
series_1 = {}
for gene in genes:
a2s_map = defaultdict(list)
for a,sid in zip(ages1,sid1):
a2s_map[a].append(sid)
new_series = pandas.Series(np.zeros(len(a2s_map)), index=a2s_map.keys())
for a,samps in a2s_map.iteritems():
new_series.ix[a] = pw_sub1T[gene].ix[ samps ].median()
new_series.name = "%s" % (a1,)
series_1[gene] = new_series
sub2 = self._sd.get_expression( sid2 )
pw_sub2 = sub2.loc[genes,:]
if by_rank:
pw_sub2T = pw_sub2.transpose().rank(axis=1, ascending=False)
else:
pw_sub2T = pw_sub2.transpose()
series_2 = {}
for gene in genes:
a2s_map = defaultdict(list)
for a,sid in zip(ages2,sid2):
a2s_map[a].append(sid)
new_series = pandas.Series(np.zeros(len(a2s_map)), index=a2s_map.keys())
for a,samps in a2s_map.iteritems():
new_series.ix[a] = pw_sub2T[gene].ix[ samps ].median()
new_series.name = "%s" % (a2,)
series_2[gene] = new_series
avg_rank = 0
for gene in genes:
a,b = series_1[gene].align(series_2[gene])
a = a.interpolate().bfill().ffill()
b = b.interpolate().bfill().ffill()
q = pandas.DataFrame(a)
q = q.join(b)
series_1[gene].name = series_1[gene].name + '-true';
series_2[gene].name = series_2[gene].name + '-true';
q = q.join(series_1[gene]);
q = q.join(series_2[gene]);
if by_rank:
res_type= 'rank'
else:
res_type='expression'
fname = "%s-%s-%s-%s.tsv" % (res_type, strain, gene,
'-V-'.join(q.columns))
q.to_csv(opj(self._local_data_path, fname), sep='\t',
index_label="age", na_rep='null')
if by_rank:
fname = "%s-%s-%s.tsv" % (strain, gene,
'-V-'.join(q.columns))
description = {
'filename-rank' : os.path.join(web_path, 'rank-'+fname),
'filename-expression' : os.path.join( web_path, 'expression-' + fname),
'strain': strain,
'gene': gene,
'age' : 'age',
'base' : a1,
'baseLong': a1,
'comp' : a2,
'compLong':a2,
'avg_rank': a.mean()
}
descriptions.append(description)
by_rank = True
return json.dumps( sorted( descriptions , key=lambda x: x['avg_rank'] ))
def genNetworkGeneExpTables(self, pathway, by_rank=False):
genes = self._sd.get_genes( pathway )
web_path = self._data_path
rstr = 'rank' if by_rank else 'exp'
tables = {'type': rstr,
'pathway': pathway
}
for strain in self._mi.get_strains():
tables[strain] = {}
alleles = self._mi.get_nominal_alleles( strain )
for allele in self._mi.get_nominal_alleles( strain ):
tables[strain][allele] = {}
sid = self._mi.get_sample_ids( strain, allele)
ages = [self._mi.get_age( s ) for s in sid]
ages = self.jitter(ages)
sub = self._sd.get_expression( sid )
pw_sub = sub.loc[genes,:]
s_a_map = dict([(s,a) for s,a in zip( sid, ages )])
pw_sub.rename( columns=s_a_map, inplace=True )
pw_sub = pw_sub.reindex_axis(sorted(pw_sub.columns), axis=1)
if by_rank:
pw_subT = pw_sub.transpose().rank(axis=1, ascending=False)
else:
pw_subT = pw_sub.transpose()
tables[strain][allele]['samples'] = [sn for a,sn in sorted(zip(ages,sid))]
tables[strain][allele]['ages'] = pw_subT.index.tolist()
tables[strain][allele]['genes'] = pw_subT.columns.tolist()
tables[strain][allele]['table'] = pw_subT.values.tolist()
return tables
from datadirac.aggregate import DataForDisplay
import tempfile
from datadirac.utils import stat
import boto
from boto.s3.key import Key
class NetworkTSV:
def __init__( self ):
pass
def _available(self):
res = DataForDisplay.scan()
results = {}
for r in res:
results[r.identifier +'-' +r.timestamp] = r.attribute_values
return results
def get_display_info(self, select=[], display_vars=['network', 'description', 'alleles', 'strains'] ):
always = ['identifier', 'timestamp']
_vars = always + display_vars;
result = []
current = self._available()
if select:
for s_id in select:
if s_id in current:
selected = {'id':s_id}
for v in _vars:
if type( current[s_id][v] ) is set:
selected[v] = list( current[s_id][v] )
else:
selected[v] = current[s_id][v]
result.append(selected)
else:
for s_id in current.keys() :
if s_id in current:
selected = {'id':s_id}
for v in _vars:
if type( current[s_id][v] ) is set:
selected[v] = list( current[s_id][v] )
else:
selected[v] = current[s_id][v]
result.append(selected)
return result
def set_qval_table( self, identifier, timestamp ):
res = DataForDisplay.get(identifier, timestamp)
s3 = boto.connect_s3()
bucket = s3.get_bucket( res.data_bucket )
k = bucket.get_key( res.data_file )
with tempfile.TemporaryFile() as fp:
k.get_contents_to_file(fp)
fp.seek(0)
qv = stat.get_qval_table(fp)
with tempfile.TemporaryFile() as fp2:
qv.to_csv( fp2, sep='\t', index_label='networks' )
fp2.seek(0)
k = Key(bucket)
k.key = 'qvals-' + identifier + '-' + timestamp + '.tsv'
k.set_contents_from_file(fp2)
res.qvalue_file = 'qvals-' + identifier + '-' + timestamp + '.tsv'
res.save()
def get_fdr_cutoffs( self, identifier, timestamp, alphas=[.05]):
"""
By benjamini-hochberg
"""
res = DataForDisplay.get(identifier, timestamp)
s3 = boto.connect_s3()
bucket = s3.get_bucket( res.data_bucket )
k = bucket.get_key( res.data_file )
with tempfile.TemporaryFile() as fp:
k.get_contents_to_file(fp)
fp.seek(0)
res = stat.get_fdr_cutoffs(fp, alphas=alphas)
return res
from pynamodb.models import Model
from pynamodb.attributes import UnicodeAttribute
class NetworkInfo(Model):
table_name = 'net_info_table'
src_id = UnicodeAttribute(hash_key=True)
pw_id = UnicodeAttribute(range_key=True)
broad_url=UnicodeAttribute(default='')
gene_ids=UnicodeAttribute(default='')
class CrossTalkMatrix:
"""
Dataframe where df[a,b] says what percent of a is shared with b
"""
def __init__(self):
self.edge_list = defaultdict(set)
def generate(self, networks=[]):
"""
networks is a list of tuples (src_id, pw_id)
"""
if networks:
for item in NetworkInfo.batch_get(networks):
self.add_item( item )
else:
for item in NetworkInfo.scan():
self.add_item(item)
n = len(self.edge_list)
self.cross_talk = pandas.DataFrame(np.zeros((n,n)),
index=self.edge_list.keys(), columns=self.edge_list.keys())
for index, igeneset in self.edge_list.iteritems():
for column, ggeneset in self.edge_list.iteritems():
self.cross_talk.at[index, column] = len(igeneset.intersection( ggeneset )) / float(len(igeneset))
return self.cross_talk
def add_item(self, item ):
net_string = item.gene_ids
self.edge_list[item.pw_id] = set(net_string[6:].strip().split('~:~'))
def read_pickle(self, file_name):
self.cross_talk = pandas.read_pickle(file_name)
def get_crosstalk(self, networks, bucket=None, file_name=None):
if not bucket:
return self.generate( networks )
else:
conn = boto.connect_s3()
b = conn.get_bucket(bucket)
k = b.get_key(file_name)
with tempfile.SpooledTemporaryFile() as f:
k.get_contents_to_file(f)
f.seek(0)
self.cross_talk = pickle.load(f)
if not isinstance(networks[0], basestring):
networks = [n for _,n in networks]
return self.cross_talk.loc[networks, networks]
from datadirac.aggregate import RunGPUDiracModel, DataForDisplay
import base64
import json
import pprint
import boto
import tempfile
import pandas
import json
import numpy as np
def get_sig(run_id, sig_level = .05):
"""
Returns the significant networks at the given significance by
Benjamini-Hochberg
"""
myitem = None
for item in DataForDisplay.query(run_id):
if not myitem:
myitem = item
elif myitem and item.timestamp > myitem.timestamp:
myitem = item
bucket = myitem.data_bucket
pv_file = myitem.data_file
conn = boto.connect_s3()
b = conn.get_bucket(bucket)
k = b.get_key(pv_file)
with tempfile.TemporaryFile() as fp:
k.get_contents_to_file(fp)
fp.seek(0)
table = pandas.read_csv(fp, sep='\t')
nv = NetworkTSV()
cutoffs = nv.get_fdr_cutoffs( myitem.identifier, myitem.timestamp, [sig_level] )
valid = []
for k,v in cutoffs.iteritems():
for cut in v.itervalues():
valid += table[table[k] <= cut]['networks'].tolist()
return list(set(valid))
def dumpExpression():
runs = {}
ignore = ['black_6_go_wt_v_q111']
for item in RunGPUDiracModel.scan():
if item.run_id not in ignore:
runs[item.run_id] = json.loads(base64.b64decode( item.config ))
for k in runs.keys():
net_table = runs[k]['network_config']['network_table']
net_source_id = runs[k]['network_config']['network_source']
source_dataframe = runs[k]['dest_data']['dataframe_file']
metadata_file = runs[k]['dest_data']['meta_file']
source_bucket = runs[k]['dest_data']['working_bucket']
app_path = '/home/sgeadmin/.local/lib/python2.7/site-packages/tcdiracweb-0.1.0-py2.7.egg/tcdiracweb'
data_path = 'static/data'
runs['tsvargs'] = (net_table, net_source_id, source_dataframe, metadata_file, app_path, source_bucket, data_path)
runs['sig_nets'] = get_sig(k)
t = TSVGen( *runs['tsvargs'] )
for pw in runs['sig_nets']:
for rank in [True,False]:
for j in [True,False]:
print t.genNetworkGeneExpTables(pw, by_rank=rank)
return
print pw
def get_expression_from_run( run_id, pathway, by_rank ):
res = RunGPUDiracModel.get( run_id, timestamp )
config = json.loads(base64.b64decode( res.config ))
net_table = config['network_config']['network_table']
net_source_id = config['network_config']['network_source']
source_dataframe = config['dest_data']['dataframe_file']
metadata_file = config['dest_data']['meta_file']
source_bucket = config['dest_data']['working_bucket']
app_path = '/home/sgeadmin/.local/lib/python2.7/site-packages/tcdiracweb-0.1.0-py2.7.egg/tcdiracweb'
data_path = 'static/data'
args = (net_table, net_source_id, source_dataframe, metadata_file,
app_path, source_bucket, data_path)
tsv = TSVGen( * args )
return t.genNetworkGeneExpTables( pathway, by_rank=by_rank )
def dataframe_to_backgrid( dataframe, type_map={}, sorter=None ):
columns = []
index_name = dataframe.index.name
def df2bgtype( column ):
dftype = column.dtype
name = column.name
if name in type_map:
return type_map[name]
if dftype == object:
return 'string'
elif dftype == int:
return 'integer'
elif dftype == float:
return 'number'
def pretty_name( ugly_name ):
prettier = ' '.join(re.split( r'[_-]', ugly_name))
return string.capwords( prettier )
columns.append({ 'name':'id',
'label': index_name if index_name else 'Index',
'editable': False,
'cell' : df2bgtype(dataframe.index)
})
for i in range(len(dataframe.columns)):
columns.append({'name': dataframe.iloc[:,i].name,
'label': pretty_name(dataframe.iloc[:,i].name),
'cell': df2bgtype( dataframe.iloc[:,i] )
})
table = []
for indx in dataframe.index:
row = {'id':indx}
for col in dataframe.columns:
row[col] = dataframe.at[indx,col]
table.append(row)
return { 'columns' : columns, 'table': table }
if __name__ == "__main__":
dumpExpression()
"""
base = "/home/earls3/secondary/tcdiracweb/tcdiracweb/static/data"
#t = TSVGen( base + "/exp_mat_b6_wt_q111.pandas", base + "/metadata_b6_wt_q111.txt")
#t.genBivariate('HISTONE_MODIFICATION')
conn = boto.connect_s3()
bucket = conn.get_bucket('ndp-hdproject-csvs')
k = Key(bucket)
k.key = 'crosstalk-biocartaUkeggUgoUreactome-pandas-dataframe.pkl'
k.set_contents_from_filename('ct.pkl')"""
"""
ntsv = NetworkTSV()
di = ntsv.get_display_info()
for k in di:
print ntsv.set_qval_table(k['identifier'], k['timestamp'])
print ntsv.get_fdr_cutoffs(k['identifier'], k['timestamp'], alphas=[.05])
cm = CrossTalkMatrix()
ctm = cm.generate( networks=[('c2.cp.kegg.v4.0.symbols.gmt', 'KEGG_LEISHMANIA_INFECTION'),
('c2.cp.biocarta.v4.0.symbols.gmt', 'BIOCARTA_41BB_PATHWAY'),
('c2.cp.biocarta.v4.0.symbols.gmt', 'BIOCARTA_ACTINY_PATHWAY')] )
network = ['KEGG_LEISHMANIA_INFECTION', 'BIOCARTA_41BB_PATHWAY', 'BIOCARTA_ACTINY_PATHWAY']
print cm.get_crosstalk(network, bucket='ndp-hdproject-csvs',
file_name='crosstalk-biocartaUkeggUgoUreactome-pandas-dataframe.pkl')"""
| agpl-3.0 |
terhorst/psmcpp | smcpp/analysis/analysis.py | 2 | 5816 | import json
import numpy as np
import scipy.stats.mstats
import sklearn.mixture
import sys
from .. import estimation_tools, _smcpp, util, logging, spline, data_filter, beta_de
from ..model import SMCModel
from . import base
import smcpp.defaults
from smcpp.optimize.optimizers import SMCPPOptimizer
from smcpp.optimize.plugins import analysis_saver, parameter_optimizer
logger = logging.getLogger(__name__)
class Analysis(base.BaseAnalysis):
"""A dataset, model and inference manager to be used for estimation."""
def __init__(self, files, args):
super().__init__(files, args)
if self.npop != 1:
logger.error("Please use 'smc++ split' to estimate two-population models")
sys.exit(1)
NeN0 = self._pipeline["watterson"].theta_hat / (2. * args.mu * self._N0)
m = SMCModel([1.], self._N0, spline.Piecewise, None)
m[:] = np.log(NeN0)
hs = estimation_tools.balance_hidden_states(m, 2 + args.knots)
if args.timepoints is not None:
t1, tK = [x / 2 / self._N0 for x in args.timepoints]
else:
t1 = tK = None
hs /= (2 * self._N0)
self.hidden_states = hs
self._init_knots(hs, t1, tK)
self._init_model(args.spline)
hs0 = hs
self.hidden_states = [0., np.inf]
self._init_inference_manager(args.polarization_error, self.hidden_states)
self.alpha = 1
self._model[:] = np.log(NeN0)
self._model.randomize()
self._init_optimizer(
args.outdir,
args.base,
args.algorithm,
args.xtol,
args.ftol,
learn_rho=False,
single=False
)
self._init_regularization(args)
self.run(1)
pipe = self._pipeline
pipe.add_filter(data_filter.Thin(thinning=args.thinning))
pipe.add_filter(data_filter.BinObservations(w=args.w))
pipe.add_filter(data_filter.RecodeMonomorphic())
pipe.add_filter(data_filter.Compress())
pipe.add_filter(data_filter.Validate())
pipe.add_filter(data_filter.DropUninformativeContigs())
pipe.add_filter(data_filter.Summarize())
try:
self._empirical_tmrca(2 * args.knots)
hs = np.r_[0., self._etmrca_quantiles, np.inf]
except Exception as e:
logger.warn("Mixture model failed for setting hidden states. Error was: %s", e)
hs = estimation_tools.balance_hidden_states(m, 2 * args.knots) / 2 / self._N0
self.hidden_states = hs
self._init_knots(hs, t1, tK)
m = self._model
self._init_model(args.spline)
self._model[:] = np.log(m(self._knots))
self._init_inference_manager(args.polarization_error, self.hidden_states)
self.alpha = args.w
self._init_optimizer(
args.outdir,
args.base,
args.algorithm,
args.xtol,
args.ftol,
learn_rho=args.r is None,
single=not args.multi,
)
self._init_regularization(args)
def _init_model(self, spline_class):
## Initialize model
logger.debug("knots in coalescent scaling:\n%s", str(self._knots))
spline_class = {
"cubic": spline.CubicSpline,
"bspline": spline.BSpline,
"akima": spline.AkimaSpline,
"pchip": spline.PChipSpline,
"piecewise": spline.Piecewise,
}[spline_class]
assert self.npop == 1
self._model = SMCModel(self._knots, self._N0, spline_class, self.populations[0])
def _init_knots(self, hs, t1, tK):
self._knots = hs[1:-1:2]
mult = np.mean(self._knots[1:] / self._knots[:-1])
k0 = self._knots[0]
t = t1 or k0
a = []
while t < k0:
a = np.r_[a, t]
t *= mult
self._knots = np.r_[a, self._knots]
if tK is not None and tK > self._knots[-1]:
self._knots = np.r_[self._knots, tK]
logger.debug("Knots are: %s", self._knots)
def _init_regularization(self, args):
if self._args.lambda_:
self._penalty = args.lambda_
else:
self._penalty = abs(self.Q()) * (10 ** -args.regularization_penalty)
logger.debug("Regularization penalty: lambda=%g", self._penalty)
_OPTIMIZER_CLS = SMCPPOptimizer
def _init_optimizer(self, outdir, base, algorithm, xtol, ftol, learn_rho, single):
super()._init_optimizer(outdir, base, algorithm, xtol, ftol, single)
if learn_rho:
rho_bounds = lambda: (self._theta / 100, 100 * self._theta)
self._optimizer.register_plugin(
parameter_optimizer.ParameterOptimizer("rho", rho_bounds)
)
def _empirical_tmrca(self, k):
'''Calculate the empirical distribution of TMRCA in the
distinguished lineages by counting mutations'''
w = self._pipeline['mutation_counts'].w
X = self._pipeline['mutation_counts'].counts
logger.debug("Computing quantiles of TMRCA distribution from M=%d TMRCA samples", len(X))
logger.debug("Unresampled quantiles (0/10/25/50/75/100): %s",
scipy.stats.mstats.mquantiles(X, [0, .1, .25, .5, .75, 1.]))
# fit a k-poisson mixture model
gmm = sklearn.mixture.GaussianMixture(n_components=k).fit(X[:, None])
Y = gmm.sample(n_samples=100000)[0]
p = np.logspace(np.log10(.01), np.log10(.99), k)
q = scipy.stats.mstats.mquantiles(Y[Y>0], p) / (2 * self._theta * w)
logger.debug("Quantiles: %s", " ".join("F(%g)=%g" % c for c in zip(q, p)))
# 2 * E(TMRCA) * self._theta ~= q
logger.debug("empirical TMRCA distribution: %s", q)
self._etmrca_quantiles = q
| gpl-3.0 |
chanderbgoel/pybrain | examples/rl/valuebased/nfq.py | 25 | 1973 | from __future__ import print_function
#!/usr/bin/env python
__author__ = 'Thomas Rueckstiess, [email protected]'
from pybrain.rl.environments.cartpole import CartPoleEnvironment, DiscreteBalanceTask, CartPoleRenderer
from pybrain.rl.agents import LearningAgent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.rl.learners.valuebased import NFQ, ActionValueNetwork
from pybrain.rl.explorers import BoltzmannExplorer
from numpy import array, arange, meshgrid, pi, zeros, mean
from matplotlib import pyplot as plt
# switch this to True if you want to see the cart balancing the pole (slower)
render = False
plt.ion()
env = CartPoleEnvironment()
if render:
renderer = CartPoleRenderer()
env.setRenderer(renderer)
renderer.start()
module = ActionValueNetwork(4, 3)
task = DiscreteBalanceTask(env, 100)
learner = NFQ()
learner.explorer.epsilon = 0.4
agent = LearningAgent(module, learner)
testagent = LearningAgent(module, None)
experiment = EpisodicExperiment(task, agent)
def plotPerformance(values, fig):
plt.figure(fig.number)
plt.clf()
plt.plot(values, 'o-')
plt.gcf().canvas.draw()
# Without the next line, the pyplot plot won't actually show up.
plt.pause(0.001)
performance = []
if not render:
pf_fig = plt.figure()
while(True):
# one learning step after one episode of world-interaction
experiment.doEpisodes(1)
agent.learn(1)
# test performance (these real-world experiences are not used for training)
if render:
env.delay = True
experiment.agent = testagent
r = mean([sum(x) for x in experiment.doEpisodes(5)])
env.delay = False
testagent.reset()
experiment.agent = agent
performance.append(r)
if not render:
plotPerformance(performance, pf_fig)
print("reward avg", r)
print("explorer epsilon", learner.explorer.epsilon)
print("num episodes", agent.history.getNumSequences())
print("update step", len(performance))
| bsd-3-clause |
LeeKamentsky/CellProfiler | cellprofiler/modules/untangleworms.py | 2 | 126008 | '''<b>UntangleWorms</b> untangles overlapping worms.
<hr>
This module either assembles a training set of sample worms in order to create a worm
model, or takes a binary image and the results of worm training and
labels the worms in the image, untangling them and associating all of a
worm's pieces together.
The results of untangling the input image will be an object set that can be used with
downstream measurment modules. If using the <i>overlapping</i> style of objects, these
can be saved as images using <b>SaveImages</b> to create a multi-page TIF file by
specifying "Objects" as the type of image to save.
<h4>Available measurements</h4>
<b>Object measurements (for "Untangle" mode only)</b>:
<ul>
<li><i>Length:</i> The length of the worm skeleton. </li>
<li><i>Angle:</i> The angle at each of the control points</li>
<li><i>ControlPointX_N, ControlPointY_N:</i> The X,Y coordinate of a control point <i>N</i>.
A control point is a sampled location along the worm shape used to construct the model.</li>
</ul>
<h4>Technical notes</h4>
<i>Training</i> involves extracting morphological information from the sample objects
provided from the previous steps. Using the default training set weights is recommended.
Proper creation of the model is dependent on providing a binary image as input consisting
of single, separated objects considered to be worms. You can the <b>Identify</b> modules
to find the tentative objects and then filter these objects to get individual worms, whether
by using <b>FilterObjects</b>, <b>EditObjectsManually</b> or the size criteria in
<b>IdentifyPrimaryObjects</b>. A binary image can be obtained from an object set by using
<b>ConvertObjectsToImage</b>.
<p>At the end of the training run, a final display window is shown displaying the following
statistical data:
<ul>
<li>A boxplot of the direction angle shape costs. The direction angles (which are between -π and π)
are the angles between lines joining consective control points. The angle 0 corresponds to
the case when two adjacent line segments are parallel (and thus belong to the same line).</li>
<li>A cumulative boxplot of the worm lengths as determined by the model.</li>
<li>A cumulative boxplot of the worm angles as determined by the model.</li>
<li>A heatmap of the covariance matrix of the feature vectors. For <i>N</i> control points,
the feature vector is of length <i>N</i>-1 and contains <i>N</i>-2 elements for each of the
angles between them, plus an element representing the worm length.</li>
</ul></p>
<p><i>Untangling</i> involves untangles the worms using a provided worm model, built
from a large number of samples of single worms. If the result of the untangling is
not satisfactory (e.g., it is unable to detect long worms or is too stringent about
shape variation) and you do not wish to re-train, you can adjust the provided worm model
manually by opening the .xml file in a text editor
and changing the values for the fields defining worm length, area etc. You may also want to adjust the
"Maximum Complexity" module setting which controls how complex clusters the untangling will handle.
Large clusters (> 6 worms) may be slow to process.</p>
<h4>References</h4>
<ul>
<li>Wählby C, Kamentsky L, Liu ZH, Riklin-Raviv T, Conery AL, O'Rourke EJ,
Sokolnicki KL, Visvikis O, Ljosa V, Irazoqui JE, Golland P, Ruvkun G,
Ausubel FM, Carpenter AE (2012). "An image analysis toolbox for high-throughput
<i>C. elegans</i> assays." <i>Nature Methods</i> 9(7): 714-716.
<a href="http://dx.doi.org/10.1038/nmeth.1984">(link)</a></li>
</ul>
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import logging
import numpy as np
import matplotlib.mlab as mlab
import os
import scipy.ndimage as scind
from scipy.sparse import coo
from scipy.interpolate import interp1d
from scipy.io import loadmat
import sys
import xml.dom.minidom as DOM
import urllib2
logger = logging.getLogger(__name__)
import cellprofiler.cpmodule as cpm
import cellprofiler.measurements as cpmeas
import cellprofiler.cpimage as cpi
import cellprofiler.objects as cpo
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.cpmath.cpmorphology as morph
import cellprofiler.preferences as cpprefs
import identify as I
from cellprofiler.cpmath.propagate import propagate
from cellprofiler.cpmath.outline import outline
from cellprofiler.preferences import standardize_default_folder_names, \
DEFAULT_INPUT_FOLDER_NAME, DEFAULT_OUTPUT_FOLDER_NAME, NO_FOLDER_NAME, \
ABSOLUTE_FOLDER_NAME, IO_FOLDER_CHOICE_HELP_TEXT
from cellprofiler.gui.help import USING_METADATA_GROUPING_HELP_REF
from cellprofiler.gui.help import RETAINING_OUTLINES_HELP, NAMING_OUTLINES_HELP
OO_WITH_OVERLAP = "With overlap"
OO_WITHOUT_OVERLAP = "Without overlap"
OO_BOTH = "Both"
MODE_TRAIN = "Train"
MODE_UNTANGLE = "Untangle"
'''Shape cost method = angle shape model for cluster paths selection'''
SCM_ANGLE_SHAPE_MODEL = 'angle_shape_model'
'''Maximum # of sets of paths considered at any level'''
MAX_CONSIDERED = 50000
'''Maximum # of different paths considered for input'''
MAX_PATHS = 400
'''Name of the worm training data list inside the image set'''
TRAINING_DATA = "TrainingData"
'''An attribute on the object names that tags them as worm objects'''
ATTR_WORM_MEASUREMENTS = "WormMeasurements"
######################################################
#
# Features measured
#
######################################################
'''Worm untangling measurement category'''
C_WORM = "Worm"
'''The length of the worm skeleton'''
F_LENGTH = "Length"
'''The angle at each of the control points (Worm_Angle_1 for example)'''
F_ANGLE = "Angle"
'''The X coordinate of a control point (Worm_ControlPointX_14 for example)'''
F_CONTROL_POINT_X = "ControlPointX"
'''The Y coordinate of a control point (Worm_ControlPointY_14 for example)'''
F_CONTROL_POINT_Y = "ControlPointY"
######################################################
#
# Training file XML tags:
#
######################################################
T_NAMESPACE = "http://www.cellprofiler.org/linked_files/schemas/UntangleWorms.xsd"
T_TRAINING_DATA = "training-data"
T_VERSION = "version"
T_MIN_AREA = "min-area"
T_MAX_AREA = "max-area"
T_COST_THRESHOLD = "cost-threshold"
T_NUM_CONTROL_POINTS = "num-control-points"
T_MEAN_ANGLES = "mean-angles"
T_INV_ANGLES_COVARIANCE_MATRIX = "inv-angles-covariance-matrix"
T_MAX_SKEL_LENGTH = "max-skel-length"
T_MAX_RADIUS = "max-radius"
T_MIN_PATH_LENGTH = "min-path-length"
T_MAX_PATH_LENGTH = "max-path-length"
T_MEDIAN_WORM_AREA = "median-worm-area"
T_OVERLAP_WEIGHT = "overlap-weight"
T_LEFTOVER_WEIGHT = "leftover-weight"
T_RADII_FROM_TRAINING = "radii-from-training"
T_TRAINING_SET_SIZE = "training-set-size"
T_VALUES = "values"
T_VALUE = "value"
C_ALL = "Process all clusters"
C_ALL_VALUE = np.iinfo(int).max
C_MEDIUM = "Medium"
C_MEDIUM_VALUE = 200
C_HIGH = "High"
C_HIGH_VALUE = 600
C_VERY_HIGH = "Very high"
C_VERY_HIGH_VALUE = 1000
C_CUSTOM = "Custom"
complexity_limits = {
C_ALL: C_ALL_VALUE,
C_MEDIUM: C_MEDIUM_VALUE,
C_HIGH: C_HIGH_VALUE,
C_VERY_HIGH: C_VERY_HIGH_VALUE
}
class UntangleWorms(cpm.CPModule):
variable_revision_number = 2
category = ["Object Processing","Worm Toolbox"]
module_name = "UntangleWorms"
def create_settings(self):
'''Create the settings that parameterize the module'''
self.mode = cps.Choice(
"Train or untangle worms?", [MODE_UNTANGLE, MODE_TRAIN],doc = """
<b>UntangleWorms</b> has two modes:
<ul>
<li><i>%(MODE_TRAIN)s</i> creates one training set per image group,
using all of the worms in the training set as examples. It then writes
the training file at the end of each image group.</li>
<li><i>%(MODE_UNTANGLE)s</i> uses the training file to untangle images of worms.</li>
</ul>
%(USING_METADATA_GROUPING_HELP_REF)s""" % globals())
self.image_name = cps.ImageNameSubscriber(
"Select the input binary image", cps.NONE,doc = """
A binary image where the foreground indicates the worm
shapes. The binary image can be produced by the <b>ApplyThreshold</b>
module.""")
self.overlap = cps.Choice(
"Overlap style", [OO_BOTH, OO_WITH_OVERLAP, OO_WITHOUT_OVERLAP],doc = """
This setting determines which style objects are output.
If two worms overlap, you have a choice of including the overlapping
regions in both worms or excluding the overlapping regions from
both worms.
<ul>
<li>Choose <i>%(OO_WITH_OVERLAP)s</i> to save objects including
overlapping regions.</li>
<li>Choose <i>%(OO_WITHOUT_OVERLAP)s</i> to save only
the portions of objects that do not overlap.</li>
<li>Choose <i>%(OO_BOTH)s</i> to save two versions: with and without overlap.</li>
</ul>""" %
globals())
self.overlap_objects = cps.ObjectNameProvider(
"Name the output overlapping worm objects", "OverlappingWorms",
provided_attributes = { ATTR_WORM_MEASUREMENTS:True },doc = """
<i>(Used only if "%(MODE_UNTANGLE)s" mode and "%(OO_BOTH)s" or "%(OO_WITH_OVERLAP)s" overlap style are selected)</i> <br>
This setting names the objects representing the overlapping
worms. When worms cross, they overlap and pixels are shared by
both of the overlapping worms. The overlapping worm objects share
these pixels and measurements of both overlapping worms will include
these pixels in the measurements of both worms."""%globals())
self.wants_overlapping_outlines = cps.Binary(
"Retain outlines of the overlapping objects?", False, doc = """
<i>(Used only if "%(MODE_UNTANGLE)s" mode and "%(OO_BOTH)s" or "%(OO_WITH_OVERLAP)s" overlap style are selected)</i> <br>
%(RETAINING_OUTLINES_HELP)s"""%globals())
self.overlapping_outlines_colormap = cps.Colormap(
"Outline colormap?",doc = """
<i>(Used only if "%(MODE_UNTANGLE)s" mode, "%(OO_BOTH)s" or "%(OO_WITH_OVERLAP)s" overlap style and retaining outlines are selected )</i> <br>
This setting controls the colormap used when drawing
outlines. The outlines are drawn in color to highlight the
shapes of each worm in a group of overlapping worms"""%globals())
self.overlapping_outlines_name = cps.OutlineNameProvider(
"Name the overlapped outline image",
"OverlappedWormOutlines",doc = """
<i>(Used only if "%(MODE_UNTANGLE)s" mode and "%(OO_BOTH)s" or "%(OO_WITH_OVERLAP)s" overlap style are selected)</i> <br>
This is the name of the outlines of the overlapped worms."""%globals())
self.nonoverlapping_objects = cps.ObjectNameProvider(
"Name the output non-overlapping worm objects", "NonOverlappingWorms",
provided_attributes = { ATTR_WORM_MEASUREMENTS:True },doc = """
<i>(Used only if "%(MODE_UNTANGLE)s" mode and "%(OO_BOTH)s" or "%(OO_WITH_OVERLAP)s" overlap style are selected)</i> <br>
This setting names the objects representing the worms,
excluding those regions where the worms overlap. When worms cross,
there are pixels that cannot be unambiguously assigned to one
worm or the other. These pixels are excluded from both worms
in the non-overlapping objects and will not be a part of the
measurements of either worm."""%globals())
self.wants_nonoverlapping_outlines = cps.Binary(
"Retain outlines of the non-overlapping worms?", False,
"""<i>(Used only if "%(MODE_UNTANGLE)s" mode and "%(OO_BOTH)s" or "%(OO_WITH_OVERLAP)s" overlap style are selected)</i> <br>
%(RETAINING_OUTLINES_HELP)s"""%globals())
self.nonoverlapping_outlines_name =cps.OutlineNameProvider(
"Name the non-overlapped outlines image",
"NonoverlappedWormOutlines",doc = """
<i>(Used only if "%(MODE_UNTANGLE)s" mode and "%(OO_BOTH)s" or "%(OO_WITH_OVERLAP)s" overlap style are selected)</i> <br>
This is the name of the of the outlines of the worms
with the overlapping sections removed."""%globals())
self.training_set_directory = cps.DirectoryPath(
"Training set file location",
support_urls = True,
allow_metadata = False,doc = """
Select the folder containing the training set to be loaded.
%(IO_FOLDER_CHOICE_HELP_TEXT)s
<p>An additional option is the following:
<ul>
<li><i>URL</i>: Use the path part of a URL. For instance, your
training set might be hosted at
<code>http://my_institution.edu/server/my_username/TrainingSet.xml</code>
To access this file, you would choose <i>URL</i> and enter
<code>http://my_institution.edu/server/my_username/</code>
as the path location.</li>
</ul></p>"""%globals())
self.training_set_directory.dir_choice = DEFAULT_OUTPUT_FOLDER_NAME
def get_directory_fn():
'''Get the directory for the CSV file name'''
return self.training_set_directory.get_absolute_path()
def set_directory_fn(path):
dir_choice, custom_path = self.training_set_directory.get_parts_from_path(path)
self.training_set_directory.join_parts(dir_choice, custom_path)
self.training_set_file_name = cps.FilenameText(
"Training set file name", "TrainingSet.xml",
doc = "This is the name of the training set file.",
get_directory_fn = get_directory_fn,
set_directory_fn = set_directory_fn,
browse_msg = "Choose training set",
exts = [("Worm training set (*.xml)", "*.xml"),
("All files (*.*)", "*.*")])
self.wants_training_set_weights = cps.Binary(
"Use training set weights?", True, doc = """
Select <i>%(YES)s</i> to use the overlap and leftover
weights from the training set.
<p>Select <i>%(NO)s</i> to override
these weights with user-specified values.</p>"""%globals())
self.override_overlap_weight = cps.Float(
"Overlap weight", 5, 0, doc = """
<i>(Used only if not using training set weights)</i> <br>
This setting controls how much weight is given to overlaps
between worms. <b>UntangleWorms</b> charges a penalty to a
particular putative grouping of worms that overlap equal to the
length of the overlapping region times the overlap weight.
<ul>
<li>Increase
the overlap weight to make <b>UntangleWorms</b> avoid overlapping
portions of worms.</li>
<li>Decrease the overlap weight to make
<b>UntangleWorms</b> ignore overlapping portions of worms.</li>
</ul>""")
self.override_leftover_weight = cps.Float(
"Leftover weight", 10, 0, doc = """
<i>(Used only if not using training set weights)</i> <br>
This setting controls how much weight is given to
areas not covered by worms.
<b>UntangleWorms</b> charges a penalty to a
particular putative grouping of worms that fail to cover all
of the foreground of a binary image. The penalty is equal to the
length of the uncovered region times the leftover weight.
<ul>
<li> Increase the leftover weight to make <b>UntangleWorms</b>
cover more foreground with worms.</li>
<li>Decrease the overlap weight to make <b>UntangleWorms</b>
ignore uncovered foreground.</li>
</ul>""")
self.min_area_percentile = cps.Float(
"Minimum area percentile", 1, 0, 100, doc="""
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
<b>UntangleWorms</b> will discard single worms whose area
is less than a certain minimum. It ranks all worms in the training
set according to area and then picks the worm at this percentile.
It then computes the minimum area allowed as this worm's area
times the minimum area factor."""%globals())
self.min_area_factor = cps.Float(
"Minimum area factor", .85, 0, doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
This setting is a multiplier that is applied to the
area of the worm, selected as described in the documentation
for <i>Minimum area percentile</i>."""%globals())
self.max_area_percentile = cps.Float(
"Maximum area percentile", 90, 0, 100,doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i><br>
<b>UntangleWorms</b> uses a maximum area to distinguish
between single worms and clumps of worms. Any blob whose area is
less than the maximum area is considered to be a single worm
whereas any blob whose area is greater is considered to be two
or more worms. <b>UntangleWorms</b> orders all worms in the
training set by area and picks the worm at the percentile
given by this setting. It then multiplies this worm's area
by the <i>Maximum area factor</i> (see below) to get the maximum
area"""%globals())
self.max_area_factor = cps.Float(
"Maximum area factor", 1.0, 0, doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
The <i>Maximum area factor</i> setting is used to
compute the maximum area as decribed above in <i>Maximum area
percentile</i>."""%globals())
self.min_length_percentile = cps.Float(
"Minimum length percentile", 1, 0, 100,doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
<b>UntangleWorms</b> uses the minimum length to restrict its
search for worms in a clump to worms of at least the minimum length.
<b>UntangleWorms</b> sorts all worms by length and picks the worm
at the percentile indicated by this setting. It then multiplies the
length of this worm by the <i>Mininmum length factor</i> (see below)
to get the minimum length."""%globals())
self.min_length_factor = cps.Float(
"Minimum length factor", 0.9, 0,doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
<b>UntangleWorms</b> uses the <i>Minimum length factor</i>
to compute the minimum length from the training set as described
in the documentation above for <i>Minimum length percentile</i>"""%globals())
self.max_length_percentile = cps.Float(
"Maximum length percentile", 99, 0, 100,doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
<b>UntangleWorms</b> uses the maximum length to restrict
its search for worms in a clump to worms of at least the maximum
length. It computes this length by sorting all of the training
worms by length. It then selects the worm at the <i>Maximum
length percentile</i> and multiplies that worm's length by
the <i>Maximum length factor</i> to get the maximum length"""%globals())
self.max_length_factor = cps.Float(
"Maximum length factor", 1.1, 0,doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
<b>UntangleWorms</b> uses this setting to compute the
maximum length as described in <i>Maximum length percentile</i>
above"""%globals())
self.max_cost_percentile = cps.Float(
"Maximum cost percentile", 90, 0, 100,doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i><br>
<b>UntangleWorms</b> computes a shape-based cost for
each worm it considers. It will restrict the allowed cost to
less than the cost threshold. During training, <b>UntangleWorms</b>
computes the shape cost of every worm in the training set. It
then orders them by cost and uses <i>Maximum cost percentile</i>
to pick the worm at the given percentile. It them multiplies
this worm's cost by the <i>Maximum cost factor</i> to compute
the cost threshold."""%globals())
self.max_cost_factor = cps.Float(
"Maximum cost factor", 1.9, 0,doc = """
<i>(Used only "%(MODE_TRAIN)s" mode is selected)</i> <br>
<b>UntangleWorms</b> uses this setting to compute the
cost threshold as described in <i>Maximum cost percentile</i>
above."""%globals())
self.num_control_points = cps.Integer(
"Number of control points", 21, 3, 50,doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
This setting controls the number of control points that
will be sampled when constructing a worm shape from its skeleton."""%globals())
self.max_radius_percentile = cps.Float(
"Maximum radius percentile", 90, 0, 100,doc = """
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
<b>UntangleWorms</b> uses the maximum worm radius during
worm skeletonization. <b>UntangleWorms</b> sorts the radii of
worms in increasing size and selects the worm at this percentile.
It then multiplies this worm's radius by the <i>Maximum radius
factor</i> (see below) to compute the maximum radius."""%globals())
self.max_radius_factor = cps.Float(
"Maximum radius factor", 1, 0,doc="""
<i>(Used only if "%(MODE_TRAIN)s" mode is selected)</i> <br>
<b>UntangleWorms</b> uses this setting to compute the
maximum radius as described in <i>Maximum radius percentile</i>
above."""%globals())
self.complexity = cps.Choice(
"Maximum complexity",
[ C_MEDIUM, C_HIGH, C_VERY_HIGH, C_ALL, C_CUSTOM],
value = C_HIGH,doc = """
<i>(Used only if "%(MODE_UNTANGLE)s" mode is selected)</i><br>
This setting controls which clusters of worms are rejected as
being too time-consuming to process. <b>UntangleWorms</b> judges
complexity based on the number of segments in a cluster where
a segment is the piece of a worm between crossing points or
from the head or tail to the first or last crossing point.
The choices are:<br>
<ul><li><i>%(C_MEDIUM)s</i>: %(C_MEDIUM_VALUE)d segments
(takes up to several minutes to process)</li>
<li><i>%(C_HIGH)s</i>: %(C_HIGH_VALUE)d segments
(takes up to a quarter-hour to process)</li>
<li><i>%(C_VERY_HIGH)s</i>: %(C_VERY_HIGH_VALUE)d segments
(can take hours to process)</li>
<li><i>%(C_CUSTOM)s</i>: allows you to enter a custom number of
segments.</li>
<li><i>%(C_ALL)s</i>: Process all worms, regardless of complexity</li>
</ul>""" % globals())
self.custom_complexity = cps.Integer(
"Custom complexity", 400, 20,doc = """
<i>(Used only if "%(MODE_UNTANGLE)s" mode and "%(C_CUSTOM)s" complexity are selected )</i>
Enter the maximum number of segments of any cluster that should
be processed."""%globals())
def settings(self):
return [self.image_name, self.overlap, self.overlap_objects,
self.nonoverlapping_objects, self.training_set_directory,
self.training_set_file_name, self.wants_training_set_weights,
self.override_overlap_weight, self.override_leftover_weight,
self.wants_overlapping_outlines,
self.overlapping_outlines_colormap,
self.overlapping_outlines_name,
self.wants_nonoverlapping_outlines,
self.nonoverlapping_outlines_name,
self.mode, self.min_area_percentile, self.min_area_factor,
self.max_area_percentile, self.max_area_factor,
self.min_length_percentile, self.min_length_factor,
self.max_length_percentile, self.max_length_factor,
self.max_cost_percentile, self.max_cost_factor,
self.num_control_points, self.max_radius_percentile,
self.max_radius_factor,
self.complexity, self.custom_complexity]
def help_settings(self):
return [self.mode, self.image_name, self.overlap, self.overlap_objects,
self.nonoverlapping_objects,
self.complexity, self.custom_complexity,
self.training_set_directory,
self.training_set_file_name, self.wants_training_set_weights,
self.override_overlap_weight, self.override_leftover_weight,
self.wants_overlapping_outlines,
self.overlapping_outlines_colormap,
self.overlapping_outlines_name,
self.wants_nonoverlapping_outlines,
self.nonoverlapping_outlines_name,
self.min_area_percentile, self.min_area_factor,
self.max_area_percentile, self.max_area_factor,
self.min_length_percentile, self.min_length_factor,
self.max_length_percentile, self.max_length_factor,
self.max_cost_percentile, self.max_cost_factor,
self.num_control_points, self.max_radius_percentile,
self.max_radius_factor]
def visible_settings(self):
result = [self.mode, self.image_name]
if self.mode == MODE_UNTANGLE:
result += [self.overlap]
if self.overlap in (OO_WITH_OVERLAP, OO_BOTH):
result += [self.overlap_objects, self.wants_overlapping_outlines]
if self.wants_overlapping_outlines:
result += [self.overlapping_outlines_colormap,
self.overlapping_outlines_name]
if self.overlap in (OO_WITHOUT_OVERLAP, OO_BOTH):
result += [self.nonoverlapping_objects,
self.wants_nonoverlapping_outlines]
if self.wants_nonoverlapping_outlines:
result += [self.nonoverlapping_outlines_name]
result += [self.complexity]
if self.complexity == C_CUSTOM:
result += [self.custom_complexity]
result += [self.training_set_directory, self.training_set_file_name,
self.wants_training_set_weights]
if not self.wants_training_set_weights:
result += [self.override_overlap_weight,
self.override_leftover_weight]
if self.mode == MODE_TRAIN:
result += [
self.min_area_percentile, self.min_area_factor,
self.max_area_percentile, self.max_area_factor,
self.min_length_percentile, self.min_length_factor,
self.max_length_percentile, self.max_length_factor,
self.max_cost_percentile, self.max_cost_factor,
self.num_control_points, self.max_radius_percentile,
self.max_radius_factor]
return result
def overlap_weight(self, params):
'''The overlap weight to use in the cost calculation'''
if not self.wants_training_set_weights:
return self.override_overlap_weight.value
elif params is None:
return 2
else:
return params.overlap_weight
def leftover_weight(self, params):
'''The leftover weight to use in the cost calculation'''
if not self.wants_training_set_weights:
return self.override_leftover_weight.value
elif params is None:
return 10
else:
return params.leftover_weight
def ncontrol_points(self):
'''# of control points when making a training set'''
if self.mode == MODE_UNTANGLE:
params = self.read_params()
return params.num_control_points
if not self.wants_training_set_weights:
return 21
else:
return self.num_control_points.value
@property
def max_complexity(self):
if self.complexity != C_CUSTOM:
return complexity_limits[self.complexity.value]
return self.custom_complexity.value
def prepare_group(self, workspace, grouping, image_numbers):
'''Prepare to process a group of worms'''
d = self.get_dictionary(workspace.image_set_list)
d[TRAINING_DATA] = []
def get_dictionary_for_worker(self):
'''Don't share the training data dictionary between workers'''
return { TRAINING_DATA:[] }
def run(self, workspace):
'''Run the module on the current image set'''
if self.mode == MODE_TRAIN:
self.run_train(workspace)
else:
self.run_untangle(workspace)
class TrainingData(object):
'''One worm's training data'''
def __init__(self, area, skel_length, angles, radial_profile):
self.area = area
self.skel_length = skel_length
self.angles = angles
self.radial_profile = radial_profile
def run_train(self, workspace):
'''Train based on the current image set'''
image_name = self.image_name.value
image_set = workspace.image_set
image = image_set.get_image(image_name,
must_be_binary = True)
num_control_points = self.ncontrol_points()
labels, count = scind.label(image.pixel_data, morph.eight_connect)
skeleton = morph.skeletonize(image.pixel_data)
distances = scind.distance_transform_edt(image.pixel_data)
worms = self.get_dictionary(workspace.image_set_list)[TRAINING_DATA]
areas = np.bincount(labels.ravel())
if self.show_window:
dworms = workspace.display_data.worms = []
workspace.display_data.input_image = image.pixel_data
for i in range(1, count+1):
mask = labels == i
graph = self.get_graph_from_binary(
image.pixel_data & mask, skeleton & mask)
path_coords, path = self.get_longest_path_coords(
graph, np.iinfo(int).max)
if len(path_coords) == 0:
continue
cumul_lengths = self.calculate_cumulative_lengths(path_coords)
if cumul_lengths[-1] == 0:
continue
control_points = self.sample_control_points(path_coords, cumul_lengths,
num_control_points)
angles = self.get_angles(control_points)
#
# Interpolate in 2-d when looking up the distances
#
fi, fj = (control_points - np.floor(control_points)).transpose()
ci, cj = control_points.astype(int).transpose()
ci1 = np.minimum(ci+1, labels.shape[0]-1)
cj1 = np.minimum(cj+1, labels.shape[1]-1)
radial_profile = np.zeros(num_control_points)
for ii, jj, f in ((ci, cj, (1 - fi) * (1 - fj)),
(ci1, cj, fi * (1-fj)),
(ci, cj1, (1 - fi) * fj),
(ci1, cj1, fi * fj)):
radial_profile += distances[ii, jj] * f
worms.append(self.TrainingData(areas[i], cumul_lengths[-1],
angles, radial_profile))
if self.show_window:
dworms.append(control_points)
def is_aggregation_module(self):
'''Building the model requires aggregation across image sets'''
return self.mode == MODE_TRAIN
def post_group(self, workspace, grouping):
'''Write the training data file as we finish grouping.'''
if self.mode == MODE_TRAIN:
from cellprofiler.utilities.version import version_number
worms = self.get_dictionary(workspace.image_set_list)[TRAINING_DATA]
#
# Either get weights from our instance or instantiate
# the default UntangleWorms to get the defaults
#
if self.wants_training_set_weights:
this = self
else:
this = UntangleWorms()
nworms = len(worms)
num_control_points = self.ncontrol_points()
areas = np.zeros(nworms)
lengths = np.zeros(nworms)
radial_profiles = np.zeros((num_control_points, nworms))
angles = np.zeros((num_control_points-2, nworms))
for i, training_data in enumerate(worms):
areas[i] = training_data.area
lengths[i] = training_data.skel_length
angles[:,i] = training_data.angles
radial_profiles[:,i] = training_data.radial_profile
areas.sort()
lengths.sort()
min_area = this.min_area_factor.value * mlab.prctile(
areas, this.min_area_percentile.value)
max_area = this.max_area_factor.value * mlab.prctile(
areas, this.max_area_percentile.value)
median_area = np.median(areas)
min_length = this.min_length_factor.value * mlab.prctile(
lengths, this.min_length_percentile.value)
max_length = this.max_length_factor.value * mlab.prctile(
lengths, this.max_length_percentile.value)
max_skel_length = mlab.prctile(lengths, this.max_length_percentile.value)
max_radius = this.max_radius_factor.value * mlab.prctile(
radial_profiles.flatten(), this.max_radius_percentile.value)
mean_radial_profile = np.mean(radial_profiles, 1)
#
# Mirror the angles by negating them. Flip heads and tails
# because they are arbitrary.
#
angles = np.hstack((
angles,
-angles,
angles[::-1,:],
-angles[::-1,:]))
lengths = np.hstack([lengths]*4)
feat_vectors = np.vstack((angles, lengths[np.newaxis,:]))
mean_angles_length = np.mean(feat_vectors, 1)
fv_adjusted = feat_vectors - mean_angles_length[:, np.newaxis]
angles_covariance_matrix = np.cov(fv_adjusted)
inv_angles_covariance_matrix = np.linalg.inv(angles_covariance_matrix)
angle_costs = [np.dot(np.dot(fv, inv_angles_covariance_matrix), fv)
for fv in fv_adjusted.transpose()]
max_cost = this.max_cost_factor.value * mlab.prctile(
angle_costs, this.max_cost_percentile.value)
#
# Write it to disk
#
if workspace.pipeline.test_mode:
return
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
path = self.training_set_directory.get_absolute_path(m)
file_name = m.apply_metadata(self.training_set_file_name.value)
fd = open(os.path.join(path, file_name), "w")
doc = DOM.getDOMImplementation().createDocument(
T_NAMESPACE, T_TRAINING_DATA, None)
top = doc.documentElement
top.setAttribute("xmlns", T_NAMESPACE)
for tag, value in (
(T_VERSION, version_number),
(T_MIN_AREA, min_area),
(T_MAX_AREA, max_area),
(T_COST_THRESHOLD, max_cost),
(T_NUM_CONTROL_POINTS, num_control_points),
(T_MAX_SKEL_LENGTH, max_skel_length),
(T_MIN_PATH_LENGTH, min_length),
(T_MAX_PATH_LENGTH, max_length),
(T_MEDIAN_WORM_AREA, median_area),
(T_MAX_RADIUS, max_radius),
(T_OVERLAP_WEIGHT, this.override_overlap_weight.value),
(T_LEFTOVER_WEIGHT, this.override_leftover_weight.value),
(T_TRAINING_SET_SIZE, nworms)):
element = doc.createElement(tag)
content = doc.createTextNode(str(value))
element.appendChild(content)
top.appendChild(element)
for tag, values in ((T_MEAN_ANGLES, mean_angles_length),
(T_RADII_FROM_TRAINING, mean_radial_profile)):
element = doc.createElement(tag)
top.appendChild(element)
for value in values:
value_element = doc.createElement(T_VALUE)
content = doc.createTextNode(str(value))
value_element.appendChild(content)
element.appendChild(value_element)
element = doc.createElement(T_INV_ANGLES_COVARIANCE_MATRIX)
top.appendChild(element)
for row in inv_angles_covariance_matrix:
values = doc.createElement(T_VALUES)
element.appendChild(values)
for col in row:
value = doc.createElement(T_VALUE)
content = doc.createTextNode(str(col))
value.appendChild(content)
values.appendChild(value)
doc.writexml(fd, addindent=" ", newl="\n")
fd.close()
if self.show_window:
workspace.display_data.angle_costs = angle_costs
workspace.display_data.feat_vectors = feat_vectors
workspace.display_data.angles_covariance_matrix = \
angles_covariance_matrix
def run_untangle(self, workspace):
'''Untangle based on the current image set'''
params = self.read_params()
image_name = self.image_name.value
image_set = workspace.image_set
image = image_set.get_image(image_name,
must_be_binary = True)
labels, count = scind.label(image.pixel_data, morph.eight_connect)
#
# Skeletonize once, then remove any points in the skeleton
# that are adjacent to the edge of the image, then skeletonize again.
#
# This gets rid of artifacts that cause combinatoric explosions:
#
# * * * * * * * *
# * * *
# * * * * * * * *
#
skeleton = morph.skeletonize(image.pixel_data)
eroded = scind.binary_erosion(image.pixel_data, morph.eight_connect)
skeleton = morph.skeletonize(skeleton & eroded)
#
# The path skeletons
#
all_path_coords = []
if count != 0 and np.sum(skeleton) != 0:
areas = np.bincount(labels.flatten())
skeleton_areas = np.bincount(labels[skeleton])
current_index = 1
for i in range(1,count+1):
if (areas[i] < params.min_worm_area or
i >= skeleton_areas.shape[0] or
skeleton_areas[i] == 0):
# Completely exclude the worm
continue
elif areas[i] <= params.max_area:
path_coords, path_struct = self.single_worm_find_path(
workspace, labels, i, skeleton, params)
if len(path_coords) > 0 and self.single_worm_filter(
workspace, path_coords, params):
all_path_coords.append(path_coords)
else:
graph = self.cluster_graph_building(
workspace, labels, i, skeleton, params)
if len(graph.segments) > self.max_complexity:
logger.warning(
"Warning: rejecting cluster of %d segments.\n" %
len(graph.segments))
continue
paths = self.get_all_paths(
graph, params.min_path_length, params.max_path_length)
paths_selected = self.cluster_paths_selection(
graph, paths, labels, i, params)
del graph
del paths
all_path_coords += paths_selected
ijv, all_lengths, all_angles, all_control_coords_x, all_control_coords_y = \
self.worm_descriptor_building(all_path_coords, params,
labels.shape)
if self.show_window:
workspace.display_data.input_image = image.pixel_data
object_set = workspace.object_set
assert isinstance(object_set, cpo.ObjectSet)
measurements = workspace.measurements
assert isinstance(measurements, cpmeas.Measurements)
object_names = []
if self.overlap in (OO_WITH_OVERLAP, OO_BOTH):
o = cpo.Objects()
o.ijv = ijv
o.parent_image = image
name = self.overlap_objects.value
object_names.append(name)
object_set.add_objects(o, name)
I.add_object_count_measurements(measurements, name, o.count)
if self.show_window:
workspace.display_data.overlapping_labels = [
l for l, idx in o.get_labels()]
if o.count == 0:
center_x = np.zeros(0)
center_y = np.zeros(0)
else:
center_x = np.bincount(ijv[:, 2], ijv[:, 1])[o.indices] / o.areas
center_y = np.bincount(ijv[:, 2], ijv[:, 0])[o.indices] / o.areas
measurements.add_measurement(name, I.M_LOCATION_CENTER_X, center_x)
measurements.add_measurement(name, I.M_LOCATION_CENTER_Y, center_y)
measurements.add_measurement(name, I.M_NUMBER_OBJECT_NUMBER, o.indices)
#
# Save outlines
#
if self.wants_overlapping_outlines:
from matplotlib.cm import ScalarMappable
colormap = self.overlapping_outlines_colormap.value
if colormap == cps.DEFAULT:
colormap = cpprefs.get_default_colormap()
if len(ijv) == 0:
ishape = image.pixel_data.shape
outline_pixels = np.zeros((ishape[0],ishape[1], 3))
else:
my_map = ScalarMappable(cmap = colormap)
colors = my_map.to_rgba(np.unique(ijv[:,2]))
outline_pixels = o.make_ijv_outlines(colors[:,:3])
outline_image = cpi.Image(outline_pixels, parent_image = image)
image_set.add(self.overlapping_outlines_name.value,
outline_image)
if self.overlap in (OO_WITHOUT_OVERLAP, OO_BOTH):
#
# Sum up the number of overlaps using a sparse matrix
#
overlap_hits = coo.coo_matrix(
(np.ones(len(ijv)), (ijv[:,0], ijv[:,1])),
image.pixel_data.shape)
overlap_hits = overlap_hits.toarray()
mask = overlap_hits == 1
labels = coo.coo_matrix((ijv[:,2],(ijv[:,0], ijv[:,1])), mask.shape)
labels = labels.toarray()
labels[~ mask] = 0
o = cpo.Objects()
o.segmented = labels
o.parent_image = image
name = self.nonoverlapping_objects.value
object_names.append(name)
object_set.add_objects(o, name)
I.add_object_count_measurements(measurements, name, o.count)
I.add_object_location_measurements(measurements, name, labels, o.count)
if self.show_window:
workspace.display_data.nonoverlapping_labels = [
l for l, idx in o.get_labels()]
if self.wants_nonoverlapping_outlines:
outline_pixels = outline(labels) > 0
outline_image = cpi.Image(outline_pixels, parent_image = image)
image_set.add(self.nonoverlapping_outlines_name.value,
outline_image)
for name in object_names:
measurements.add_measurement(name, "_".join((C_WORM, F_LENGTH)),
all_lengths)
for values, ftr in ((all_angles, F_ANGLE),
(all_control_coords_x, F_CONTROL_POINT_X),
(all_control_coords_y, F_CONTROL_POINT_Y)):
for i in range(values.shape[1]):
feature = "_".join((C_WORM, ftr, str(i+1)))
measurements.add_measurement(name, feature, values[:, i])
def display(self, workspace, figure):
from cellprofiler.gui.cpfigure import CPLDM_ALPHA
if self.mode == MODE_UNTANGLE:
figure.set_subplots((1, 1))
cplabels = []
if self.overlap in (OO_BOTH, OO_WITH_OVERLAP):
title = self.overlap_objects.value
cplabels.append(
dict(name = self.overlap_objects.value,
labels = workspace.display_data.overlapping_labels,
mode = CPLDM_ALPHA))
else:
title = self.nonoverlapping_objects.value
if self.overlap in (OO_BOTH, OO_WITHOUT_OVERLAP):
cplabels.append(
dict(name = self.nonoverlapping_objects.value,
labels = workspace.display_data.nonoverlapping_labels))
image = workspace.display_data.input_image
if image.ndim == 2:
figure.subplot_imshow_grayscale(
0, 0, image, title = title, cplabels = cplabels)
else:
from matplotlib.path import Path
from matplotlib.patches import PathPatch
figure.set_subplots((1, 1))
figure.subplot_imshow_bw(0, 0, workspace.display_data.input_image,
title = self.image_name.value)
axes = figure.subplot(0,0)
for control_points in workspace.display_data.worms:
axes.plot(control_points[:,1],
control_points[:,0], "ro-",
markersize = 4)
def display_post_group(self, workspace, figure):
"""Display some statistical information about training, post-group
workspace - holds the display data used to create the display
figure - the module's figure.
"""
if self.mode == MODE_TRAIN:
from matplotlib.transforms import Bbox
angle_costs = workspace.display_data.angle_costs
feat_vectors = workspace.display_data.feat_vectors
angles_covariance_matrix = workspace.display_data.angles_covariance_matrix
figure = workspace.create_or_find_figure(
subplots = (4,1),
window_name = "UntangleWorms_PostGroup")
f = figure.figure
f.clf()
a = f.add_subplot(1,4,1)
a.set_position((Bbox([[.1, .1],[.15, .9]])))
a.boxplot(angle_costs)
a.set_title("Costs")
a = f.add_subplot(1,4,2)
a.set_position((Bbox([[.2, .1],[.25, .9]])))
a.boxplot(feat_vectors[-1,:])
a.set_title("Lengths")
a = f.add_subplot(1,4,3)
a.set_position((Bbox([[.30, .1],[.60, .9]])))
a.boxplot(feat_vectors[:-1,:].transpose() * 180 / np.pi)
a.set_title("Angles")
a = f.add_subplot(1,4,4)
a.set_position((Bbox([[.65, .1],[1, .45]])))
a.imshow(angles_covariance_matrix[:-1,:-1],
interpolation="nearest")
a.set_title("Covariance")
f.canvas.draw()
figure.Refresh()
def single_worm_find_path(self, workspace, labels, i, skeleton, params):
'''Finds the worm's skeleton as a path.
labels - the labels matrix, labeling single and clusters of worms
i - the labeling of the worm of interest
params - The parameter structure
returns:
path_coords: A 2 x n array, of coordinates for the path found. (Each
point along the polyline path is represented by a column,
i coordinates in the first row and j coordinates in the second.)
path_struct: a structure describing the path
'''
binary_im = labels == i
skeleton = skeleton & binary_im
graph_struct = self.get_graph_from_binary(binary_im, skeleton)
return self.get_longest_path_coords(
graph_struct, params.max_path_length)
def get_graph_from_binary(self, binary_im, skeleton, max_radius = None,
max_skel_length = None):
'''Manufacture a graph of the skeleton of the worm
Given a binary image containing a cluster of worms, returns a structure
describing the graph structure of the skeleton of the cluster. This graph
structure can later be used as input to e.g. get_all_paths().
Input parameters:
binary_im: A logical image, containing the cluster to be resolved. Must
contain exactly one connected component.
Output_parameters:
graph_struct: An object with attributes
image_size: Equal to size(binary_im).
segments: A list describing the segments of
the skeleton. Each element is an array of i,j coordinates
of the pixels making up one segment, traced in the right order.
branch_areas: A list describing the
branch areas, i.e. the areas where different segments join. Each
element is an array of i,j coordinates
of the pixels making up one branch area, in no particular order.
The branch areas will include all branchpoints,
followed by a dilation. If max_radius is supplied, all pixels remaining
after opening the binary image consisting of all pixels further
than max_pix from the image background. This allows skeleton pixels
in thick regions to be replaced by branchppoint regions, which increases
the chance of connecting skeleton pieces correctly.
incidence_matrix: A num_branch_areas x num_segments logical array,
describing the incidence relations among the branch areas and
segments. incidence_matrix(i, j) is set if and only if branch area
i connects to segment j.
incidence_directions: A num_branch_areas x num_segments logical
array, intended to indicate the directions in which the segments
are traced. incidence_directions(i,j) is set if and only if the
"start end" (as in the direction in which the pixels are enumerated
in graph_struct.segments) of segment j is connected to branch point
i.
Notes:
1. Because of a dilatation step in obtaining them, the branch areas need
not be (in fact, are never, unless binary_im contains all pixels)
a subset of the foreground pixels of binary_im. However, they are a
subset of the ones(3,3)-dilatation of binary_im.
2. The segments are not considered to actually enter the branch areas;
that is to say, the pixel set of the branch areas is disjoint from
that of the segments.
3. Even if one segment is only one pixel long (but still connects to
two branch areas), its orientation is well-defined, i.e. one branch
area will be chosen as starting end. (Even though in this case, the
"positive direction" of the segment cannot be determined from the
information in graph_struct.segments.)'''
branch_areas_binary = morph.branchpoints(skeleton)
if max_radius is not None:
#
# Add any points that are more than the worm diameter to
# the branchpoints. Exclude segments without supporting branchpoints:
#
# OK:
#
# * * * * * *
# * * *
# * * * * * *
#
# Not OK:
#
# * * * * * * * * * *
#
strel = morph.strel_disk(max_radius)
far = scind.binary_erosion(binary_im, strel)
far = scind.binary_opening(far, structure = morph.eight_connect)
far_labels, count = scind.label(far)
far_counts = np.bincount(far_labels.ravel(),
branch_areas_binary.ravel())
far[far_counts[far_labels] < 2] = False
branch_areas_binary |= far
del far
del far_labels
branch_areas_binary = scind.binary_dilation(
branch_areas_binary, structure = morph.eight_connect)
segments_binary = skeleton & ~ branch_areas_binary
if max_skel_length is not None and np.sum(segments_binary) > 0:
max_skel_length = max(int(max_skel_length),2) # paranoia
i, j, labels, order, distance, num_segments = \
self.trace_segments(segments_binary)
#
# Put breakpoints every max_skel_length, but not at end
#
max_order = np.array(scind.maximum(order, labels,
np.arange(num_segments + 1)))
big_segment = max_order >= max_skel_length
segment_count = np.maximum((max_order + max_skel_length - 1) /
max_skel_length, 1).astype(int)
segment_length = ((max_order + 1) / segment_count).astype(int)
new_bp_mask = ((order % segment_length[labels] ==
segment_length[labels] - 1) &
(order != max_order[labels]) &
(big_segment[labels]))
new_branch_areas_binary = np.zeros(segments_binary.shape, bool)
new_branch_areas_binary[i[new_bp_mask], j[new_bp_mask]] = True
new_branch_areas_binary = scind.binary_dilation(
new_branch_areas_binary, structure = morph.eight_connect)
branch_areas_binary |= new_branch_areas_binary
segments_binary &= ~new_branch_areas_binary
return self.get_graph_from_branching_areas_and_segments(
branch_areas_binary, segments_binary)
def trace_segments(self, segments_binary):
'''Find distance of every point in a segment from a segment endpoint
segments_binary - a binary mask of the segments in an image.
returns a tuple of the following:
i - the i coordinate of a point in the mask
j - the j coordinate of a point in the mask
label - the segment's label
order - the ordering (from 0 to N-1 where N is the # of points in
the segment.)
distance - the propagation distance of the point from the endpoint
num_segments - the # of labelled segments
'''
#
# Break long skeletons into pieces whose maximum length
# is max_skel_length.
#
segments_labeled, num_segments = scind.label(
segments_binary, structure = morph.eight_connect)
if num_segments == 0:
return (np.array([], int), np.array([], int), np.array([], int),
np.array([], int), np.array([]), 0)
#
# Get one endpoint per segment
#
endpoints = morph.endpoints(segments_binary)
#
# Use a consistent order: pick with lowest i, then j.
# If a segment loops upon itself, we pick an arbitrary point.
#
order = np.arange(np.prod(segments_binary.shape))
order.shape = segments_binary.shape
order[~ endpoints] += np.prod(segments_binary.shape)
labelrange = np.arange(num_segments+1).astype(int)
endpoint_loc = scind.minimum_position(order, segments_labeled,
labelrange)
endpoint_loc = np.array(endpoint_loc, int)
endpoint_labels = np.zeros(segments_labeled.shape, np.int16)
endpoint_labels[endpoint_loc[:,0], endpoint_loc[:,1]] =\
segments_labeled[endpoint_loc[:,0], endpoint_loc[:,1]]
#
# A corner case - propagate will trace a loop around both ways. So
# we have to find that last point and remove it so
# it won't trace in that direction
#
loops = ~ endpoints[endpoint_loc[1:,0], endpoint_loc[1:,1]]
if np.any(loops):
# Consider all points around the endpoint, finding the one
# which is numbered last
dilated_ep_labels = morph.grey_dilation(
endpoint_labels, footprint = np.ones((3,3), bool))
dilated_ep_labels[dilated_ep_labels != segments_labeled] = 0
loop_endpoints = scind.maximum_position(
order, dilated_ep_labels.astype(int), labelrange[1:][loops])
loop_endpoints = np.array(loop_endpoints, int)
segments_binary_temp = segments_binary.copy()
segments_binary_temp[loop_endpoints[:,0], loop_endpoints[:,1]] = False
else:
segments_binary_temp = segments_binary
#
# Now propagate from the endpoints to get distances
#
_, distances = propagate(np.zeros(segments_binary.shape),
endpoint_labels,
segments_binary_temp, 1)
if np.any(loops):
# set the end-of-loop distances to be very large
distances[loop_endpoints[:,0], loop_endpoints[:,1]] = np.inf
#
# Order points by label # and distance
#
i, j = np.mgrid[0:segments_binary.shape[0],
0:segments_binary.shape[1]]
i = i[segments_binary]
j = j[segments_binary]
labels = segments_labeled[segments_binary]
distances = distances[segments_binary]
order = np.lexsort((distances, labels))
i = i[order]
j = j[order]
labels = labels[order]
distances = distances[order]
#
# Number each point in a segment consecutively. We determine
# where each label starts. Then we subtract the start index
# of each point's label from each point to get the order relative
# to the first index of the label.
#
segment_order = np.arange(len(i))
areas = np.bincount(labels.flatten())
indexes = np.cumsum(areas) - areas
segment_order -= indexes[labels]
return i, j, labels, segment_order, distances, num_segments
def get_graph_from_branching_areas_and_segments(
self, branch_areas_binary, segments_binary):
'''Turn branches + segments into a graph
branch_areas_binary - binary mask of branch areas
segments_binary - binary mask of segments != branch_areas
Given two binary images, one containing "branch areas" one containing
"segments", returns a structure describing the incidence relations
between the branch areas and the segments.
Output is same format as get_graph_from_binary(), so for details, see
get_graph_from_binary
'''
branch_areas_labeled, num_branch_areas = scind.label(
branch_areas_binary, morph.eight_connect)
i, j, labels, order, distance, num_segments = self.trace_segments(
segments_binary)
ooo = np.lexsort((order, labels))
i = i[ooo]
j = j[ooo]
labels = labels[ooo]
order = order[ooo]
distance = distance[ooo]
counts = (np.zeros(0, int) if len(labels) == 0
else np.bincount(labels.flatten())[1:])
branch_ij = np.argwhere(branch_areas_binary)
if len(branch_ij) > 0:
ooo = np.lexsort([
branch_ij[:,0], branch_ij[:,1],
branch_areas_labeled[branch_ij[:,0], branch_ij[:,1]]])
branch_ij = branch_ij[ooo]
branch_labels = branch_areas_labeled[branch_ij[:,0], branch_ij[:,1]]
branch_counts = np.bincount(branch_areas_labeled.flatten())[1:]
else:
branch_labels = np.zeros(0, int)
branch_counts = np.zeros(0, int)
#
# "find" the segment starts
#
starts = order == 0
start_labels = np.zeros(segments_binary.shape, int)
start_labels[i[starts], j[starts]] = labels[starts]
#
# incidence_directions = True for starts
#
incidence_directions = self.make_incidence_matrix(
branch_areas_labeled, num_branch_areas, start_labels, num_segments)
#
# Get the incidence matrix for the ends
#
ends = np.cumsum(counts)-1
end_labels = np.zeros(segments_binary.shape, int)
end_labels[i[ends], j[ends]] = labels[ends]
incidence_matrix = self.make_incidence_matrix(
branch_areas_labeled, num_branch_areas, end_labels, num_segments)
incidence_matrix |= incidence_directions
class Result(object):
'''A result graph:
image_size: size of input image
segments: a list for each segment of a forward (index = 0) and
reverse N x 2 array of coordinates of pixels in a segment
segment_indexes: the index of label X into segments
segment_counts: # of points per segment
segment_order: for each pixel, its order when tracing
branch_areas: an N x 2 array of branch point coordinates
branch_area_indexes: index into the branch areas per branchpoint
branch_area_counts: # of points in each branch
incidence_matrix: matrix of areas x segments indicating connections
incidence_directions: direction of each connection
'''
def __init__(self, branch_areas_binary, counts, i,j,
branch_ij, branch_counts, incidence_matrix,
incidence_directions):
self.image_size = tuple(branch_areas_binary.shape)
self.segment_coords = np.column_stack((i,j))
self.segment_indexes = np.cumsum(counts) - counts
self.segment_counts = counts
self.segment_order = order
self.segments = [
(self.segment_coords[self.segment_indexes[i]:
(self.segment_indexes[i] +
self.segment_counts[i])],
self.segment_coords[self.segment_indexes[i]:
(self.segment_indexes[i] +
self.segment_counts[i])][::-1])
for i in range(len(counts))]
self.branch_areas = branch_ij
self.branch_area_indexes = np.cumsum(branch_counts) - branch_counts
self.branch_area_counts = branch_counts
self.incidence_matrix = incidence_matrix
self.incidence_directions = incidence_directions
return Result(branch_areas_binary, counts, i,j, branch_ij, branch_counts,
incidence_matrix, incidence_directions)
def make_incidence_matrix(self, L1, N1, L2, N2):
'''Return an N1+1 x N2+1 matrix that marks all L1 and L2 that are 8-connected
L1 - a labels matrix
N1 - # of labels in L1
L2 - a labels matrix
N2 - # of labels in L2
L1 and L2 should have no overlap
Returns a matrix where M[n,m] is true if there is some pixel in
L1 with value n that is 8-connected to a pixel in L2 with value m
'''
#
# Overlay the two labels matrix
#
L = L1.copy()
L[L2 != 0] = L2[L2 != 0] + N1
neighbor_count, neighbor_index, n2 = \
morph.find_neighbors(L)
if np.all(neighbor_count == 0):
return np.zeros((N1, N2), bool)
#
# Keep the neighbors of L1 / discard neighbors of L2
#
neighbor_count = neighbor_count[:N1]
neighbor_index = neighbor_index[:N1]
n2 = n2[:(neighbor_index[-1] + neighbor_count[-1])]
#
# Get rid of blanks
#
label = np.arange(N1)[neighbor_count > 0]
neighbor_index = neighbor_index[neighbor_count > 0]
neighbor_count = neighbor_count[neighbor_count > 0]
#
# Correct n2 beause we have formerly added N1 to its labels. Make
# it zero-based.
#
n2 -= N1 + 1
#
# Create runs of n1 labels
#
n1 = np.zeros(len(n2), int)
n1[0] = label[0]
n1[neighbor_index[1:]] = label[1:] - label[:-1]
n1 = np.cumsum(n1)
incidence = coo.coo_matrix((np.ones(n1.shape), (n1,n2)),
shape = (N1, N2)).toarray()
return incidence != 0
def get_longest_path_coords(self, graph_struct, max_length):
'''Given a graph describing the structure of the skeleton of an image,
returns the longest non-self-intersecting (with some caveats, see
get_all_paths.m) path through that graph, specified as a polyline.
Inputs:
graph_struct: A structure describing the graph. Same format as returned
by get_graph_from_binary(), see that file for details.
Outputs:
path_coords: A n x 2 array, where successive columns contains the
coordinates of successive points on the paths (which when joined with
line segments form the path itself.)
path_struct: A structure, with entries 'segments' and 'branch_areas',
descring the path found, in relation to graph_struct. See
get_all_paths.m for details.'''
path_list = self.get_all_paths(graph_struct, 0, max_length)
current_longest_path_coords = []
current_max_length = 0
current_path = None
for path in path_list:
path_coords = self.path_to_pixel_coords(graph_struct, path)
path_length = self.calculate_path_length(path_coords)
if path_length >= current_max_length:
current_longest_path_coords = path_coords
current_max_length = path_length
current_path = path
return current_longest_path_coords, current_path
def path_to_pixel_coords(self, graph_struct, path):
'''Given a structure describing paths in a graph, converts those to a
polyline (i.e. successive coordinates) representation of the same graph.
(This is possible because the graph_struct descriptor contains
information on where the vertices and edges of the graph were initially
located in the image plane.)
Inputs:
graph_struct: A structure describing the graph. Same format as returned
by get_graph_from_binary(), so for details, see that file.
path_struct: A structure which (in relation to graph_struct) describes
a path through the graph. Same format as (each entry in the list)
returned by get_all_paths(), so see further get_all_paths.m
Outputs:
pixel_coords: A n x 2 double array, where each column contains the
coordinates of one point on the path. The path itself can be formed
by joining these points successively to each other.
Note:
Because of the way the graph is built, the points in pixel_coords are
likely to contain segments consisting of runs of pixels where each is
close to the next (in its 8-neighbourhood), but interleaved with
reasonably long "jumps", where there is some distance between the end
of one segment and the beginning of the next.'''
if len(path.segments) == 1:
return graph_struct.segments[path.segments[0]][0]
direction = graph_struct.incidence_directions[path.branch_areas[0],
path.segments[0]]
result = [graph_struct.segments[path.segments[0]][direction]]
for branch_area, segment in zip(path.branch_areas, path.segments[1:]):
direction = not graph_struct.incidence_directions[branch_area,
segment]
result.append(graph_struct.segments[segment][direction])
return np.vstack(result)
def calculate_path_length(self, path_coords):
'''Return the path length, given path coordinates as Nx2'''
if len(path_coords) < 2:
return 0
return np.sum(np.sqrt(np.sum((path_coords[:-1]-path_coords[1:])**2,1)))
def calculate_cumulative_lengths(self, path_coords):
'''return a cumulative length vector given Nx2 path coordinates'''
if len(path_coords) < 2:
return np.array([0] * len(path_coords))
return np.hstack(([0],
np.cumsum(np.sqrt(np.sum((path_coords[:-1]-path_coords[1:])**2,1)))))
def single_worm_filter(self, workspace, path_coords, params):
'''Given a path representing a single worm, caculates its shape cost, and
either accepts it as a worm or rejects it, depending on whether or not
the shape cost is higher than some threshold.
Inputs:
path_coords: A N x 2 array giving the coordinates of the path.
params: the parameters structure from which we use
cost_theshold: Scalar double. The maximum cost possible for a worm;
paths of shape cost higher than this are rejected.
num_control_points. Scalar positive integer. The shape cost
model uses control points sampled at equal intervals along the
path.
mean_angles: A (num_control_points-1) x
1 double array. See calculate_angle_shape_cost() for how this is
used.
inv_angles_covariance_matrix: A
(num_control_points-1)x(num_control_points-1) double matrix. See
calculate_angle_shape_cost() for how this is used.
Returns true if worm passes filter'''
if len(path_coords) < 2:
return False
cumul_lengths = self.calculate_cumulative_lengths(path_coords)
total_length = cumul_lengths[-1]
control_coords = self.sample_control_points(
path_coords, cumul_lengths, params.num_control_points)
cost = self.calculate_angle_shape_cost(
control_coords, total_length, params.mean_angles,
params.inv_angles_covariance_matrix)
return cost < params.cost_threshold
def sample_control_points(self, path_coords, cumul_lengths, num_control_points):
'''Sample equally-spaced control points from the Nx2 path coordinates
Inputs:
path_coords: A Nx2 double array, where each column specifies a point
on the path (and the path itself is formed by joining successive
points with line segments). Such as returned by
path_struct_to_pixel_coords().
cumul_lengths: A vector, where the ith entry indicates the
length from the first point of the path to the ith in path_coords).
In most cases, should be calculate_cumulative_lenghts(path_coords).
n: A positive integer. The number of control points to sample.
Outputs:
control_coords: A N x 2 double array, where the jth column contains the
jth control point, sampled along the path. The first and last control
points are equal to the first and last points of the path (i.e. the
points whose coordinates are the first and last columns of
path_coords), respectively.'''
assert num_control_points > 2
#
# Paranoia - eliminate any coordinates with length = 0, esp the last.
#
path_coords = path_coords.astype(float)
cumul_lengths = cumul_lengths.astype(float)
mask = np.hstack(([True], cumul_lengths[1:] != cumul_lengths[:-1]))
path_coords = path_coords[mask]
#
# Create a function that maps control point index to distance
#
ncoords = len(path_coords)
f = interp1d(cumul_lengths, np.linspace(0.0, float(ncoords-1), ncoords))
#
# Sample points from f (for the ones in the middle)
#
first = float(cumul_lengths[-1]) / float(num_control_points-1)
last = float(cumul_lengths[-1]) - first
findices = f(np.linspace(first, last, num_control_points-2))
indices = findices.astype(int)
assert indices[-1] < ncoords-1
fracs = findices - indices
sampled = (path_coords[indices,:] * (1-fracs[:,np.newaxis]) +
path_coords[(indices+1),:] * fracs[:,np.newaxis])
#
# Tack on first and last
#
sampled = np.vstack((path_coords[:1,:], sampled, path_coords[-1:,:]))
return sampled
def calculate_angle_shape_cost(self, control_coords, total_length,
mean_angles, inv_angles_covariance_matrix):
'''% Calculates a shape cost based on the angle shape cost model.
Given a set of N control points, calculates the N-2 angles between
lines joining consecutive control points, forming them into a vector.
The function then appends the total length of the path formed, as an
additional value in the now (N-1)-dimensional feature
vector.
The returned value is the square of the Mahalanobis distance from
this feature vector, v, to a training set with mean mu and covariance
matrix C, calculated as
cost = (v - mu)' * C^-1 * (v - mu)
Input parameters:
control_coords: A 2 x N double array, containing the coordinates of
the control points; one control point in each column. In the same
format as returned by sample_control_points().
total_length: Scalar double. The total length of the path from which the control
points are sampled. (I.e. the distance along the path from the
first control poin to the last. E.g. as returned by
calculate_path_length().
mean_angles: A (N-1) x 1 double array. The mu in the above formula,
i.e. the mean of the feature vectors as calculated from the
training set. Thus, the first N-2 entries are the means of the
angles, and the last entry is the mean length of the training
worms.
inv_angles_covariance_matrix: A (N-1)x(N-1) double matrix. The
inverse of the covariance matrix of the feature vectors in the
training set. Thus, this is the C^-1 (nb: not just C) in the
above formula.
Output parameters:
current_shape_cost: Scalar double. The squared Mahalanobis distance
calculated. Higher values indicate that the path represented by
the control points (and length) are less similar to the training
set.
Note: All the angles in question here are direction angles,
constrained to lie between -pi and pi. The angle 0 corresponds to
the case when two adjacnet line segments are parallel (and thus
belong to the same line); the angles can be thought of as the
(signed) angles through which the path "turns", and are thus not the
angles between the line segments as such.'''
angles = self.get_angles(control_coords)
feat_vec = np.hstack((angles, [total_length])) - mean_angles
return np.dot(np.dot(feat_vec, inv_angles_covariance_matrix), feat_vec)
def get_angles(self, control_coords):
'''Extract the angles at each interior control point
control_coords - an Nx2 array of coordinates of control points
returns an N-2 vector of angles between -pi and pi
'''
segments_delta = control_coords[1:] - control_coords[:-1]
segment_bearings = np.arctan2(segments_delta[:,0], segments_delta[:,1])
angles = segment_bearings[1:] - segment_bearings[:-1]
#
# Constrain the angles to -pi <= angle <= pi
#
angles[angles > np.pi] -= 2 * np.pi
angles[angles < -np.pi] += 2 * np.pi
return angles
def cluster_graph_building(self, workspace, labels, i, skeleton, params):
binary_im = labels == i
skeleton = skeleton & binary_im
return self.get_graph_from_binary(
binary_im, skeleton, params.max_radius,
params.max_skel_length)
class Path(object):
def __init__(self, segments, branch_areas):
self.segments = segments
self.branch_areas = branch_areas
def __repr__(self):
return "{ segments="+repr(self.segments)+" branch_areas="+repr(self.branch_areas)+" }"
def get_all_paths(self, graph_struct, min_length, max_length):
'''Given a structure describing a graph, returns a cell array containing
a list of all paths through the graph.
The format of graph_struct is exactly that outputted by
get_graph_from_binary()
Below, "vertex" refers to the "branch areas" of the
graph_struct, and "edge" to refer to the "segments".
For the purposes of this function, a path of length n is a sequence of n
distinct edges
e_1, ..., e_n
together with a sequence of n-1 distinct vertices
v_1, ..., v_{n-1}
such that e_1 is incident to v_1, v_1 incident to e_2, and so on.
Note that, since the ends are not considered parts of the paths, cyclic
paths are allowed (i.e. ones starting and ending at the same vertex, but
not self-crossing ones.)
Furthermore, this function also considers two paths identical if one can
be obtained by a simple reversation of the other.
This function works by a simple depth-first search. It seems
unnecessarily complicated compared to what it perhaps could have been;
this is due to the fact that the endpoints are segments are not
considered as vertices in the graph model used, and so each edge can be
incident to less than 2 vertices.
To explain how the function works, let me define an "unfinished path" to
be a sequence of n edges e_1,...,e_n and n distinct vertices v_1, ..., v_n,
where incidence relations e_1 - v_1 - e_2 - ... - e_n - v_n apply, and
the intention is for the path to be continued through v_n. In constrast,
call paths as defined in the previous paragraphs (where the last vertex
is not included) "finished".
The function first generates all unfinished paths of length 1 by looping
through all possible edges, and for each edge at most 2 "continuation"
vertices. It then calls get_all_paths_recur(), which, given an unfinished
path, recursively generates a list of all possible finished paths
beginning that unfinished path.
To ensure that paths are only returned in one of the two possible
directions, only 1-length paths and paths where the the index of the
first edge is less than that of the last edge are returned.
To faciliate the processing in get_all_paths_recur, the function
build_incidence_lists is used to calculate incidence tables in a list
form.
The output is a list of objects, "o" of the form
o.segments - segment indices of the path
o.branch_areas - branch area indices of the path'''
graph_struct.incident_branch_areas, graph_struct.incident_segments = \
self.build_incidence_lists(graph_struct)
n = len(graph_struct.segments)
graph_struct.segment_lengths = np.array([
self.calculate_path_length(x[0]) for x in graph_struct.segments])
for j in range(n):
current_length = graph_struct.segment_lengths[j]
# Add all finished paths of length 1
if current_length >= min_length:
yield self.Path([j], [])
#
# Start the segment list for each branch area connected with
# a segment with the segment.
#
segment_list = [j]
branch_areas_list = [
[k] for k in graph_struct.incident_branch_areas[j]]
paths_list = self.get_all_paths_recur(graph_struct,
segment_list, branch_areas_list,
current_length, min_length, max_length)
for path in paths_list:
yield path
def build_incidence_lists(self, graph_struct):
'''Return a list of all branch areas incident to j for each segment
incident_branch_areas{j} is a row array containing a list of all those
branch areas incident to segment j; similary, incident_segments{i} is a
row array containing a list of all those segments incident to branch area
i.'''
m = graph_struct.incidence_matrix.shape[1]
n = graph_struct.incidence_matrix.shape[0]
incident_segments = [
np.arange(m)[graph_struct.incidence_matrix[i,:]]
for i in range(n)]
incident_branch_areas = [
np.arange(n)[graph_struct.incidence_matrix[:,i]]
for i in range(m)]
return incident_branch_areas, incident_segments
def get_all_paths_recur(self, graph,
unfinished_segment, unfinished_branch_areas,
current_length, min_length, max_length):
'''Recursively find paths
incident_branch_areas - list of all branch areas incident on a segment
incident_segments - list of all segments incident on a branch
'''
if len(unfinished_segment) == 0:
return
last_segment = unfinished_segment[-1]
for unfinished_branch in unfinished_branch_areas:
end_branch_area = unfinished_branch[-1]
#
# Find all segments from the end branch
#
direction = graph.incidence_directions[end_branch_area, last_segment]
last_coord = graph.segments[last_segment][direction][-1]
for j in graph.incident_segments[end_branch_area]:
if j in unfinished_segment:
continue # segment already in the path
direction = not graph.incidence_directions[end_branch_area, j]
first_coord = graph.segments[j][direction][0]
gap_length = np.sqrt(np.sum((last_coord - first_coord) **2))
next_length = current_length + gap_length + graph.segment_lengths[j]
if next_length > max_length:
continue
next_segment = unfinished_segment + [j]
if j > unfinished_segment[0] and next_length >= min_length:
# Only include if end segment index is greater
# than start
yield self.Path(next_segment, unfinished_branch)
#
# Can't loop back to "end_branch_area". Construct all of
# possible branches otherwise
#
next_branch_areas = [ unfinished_branch + [k]
for k in graph.incident_branch_areas[j]
if (k != end_branch_area) and
(k not in unfinished_branch)]
for path in self.get_all_paths_recur(
graph, next_segment, next_branch_areas,
next_length, min_length, max_length):
yield path
def cluster_paths_selection(self, graph, paths, labels, i, params):
"""Select the best paths for worms from the graph
Given a graph representing a worm cluster, and a list of paths in the
graph, selects a subcollection of paths likely to represent the worms in
the cluster.
More specifically, finds (approximately, depending on parameters) a
subset K of the set P paths, minimising
Sum, over p in K, of shape_cost(K)
+ a * Sum, over p,q distinct in K, of overlap(p, q)
+ b * leftover(K)
Here, shape_cost is a function which calculates how unlikely it is that
the path represents a true worm.
overlap(p, q) indicates how much overlap there is between paths p and q
(we want to assign a cost to overlaps, to avoid picking out essentially
the same worm, but with small variations, twice in K)
leftover(K) is a measure of the amount of the cluster "unaccounted for"
after all of the paths of P have been chosen. We assign a cost to this to
make sure we pick out all the worms in the cluster.
Shape model:'angle_shape_model'. More information
can be found in calculate_angle_shape_cost(),
Selection method
'dfs_prune': searches
through all the combinations of paths (view this as picking out subsets
of P one element at a time, to make this a search tree) depth-first,
but by keeping track of the best solution so far (and noting that the
shape cost and overlap cost terms can only increase as paths are added
to K), it can prune away large branches of the search tree guaranteed
to be suboptimal.
Furthermore, by setting the approx_max_search_n parameter to a finite
value, this method adopts a "partially greedy" approach, at each step
searching through only a set number of branches. Setting this parameter
approx_max_search_n to 1 should in some sense give just the greedy
algorithm, with the difference that this takes the leftover cost term
into account in determining how many worms to find.
Input parameters:
graph_struct: A structure describing the graph. As returned from e.g.
get_graph_from_binary().
path_structs_list: A cell array of structures, each describing one path
through the graph. As returned by cluster_paths_finding().
params: The parameters structure. The parameters below should be
in params.cluster_paths_selection
min_path_length: Before performing the search, paths which are too
short or too long are filtered away. This is the minimum length, in
pixels.
max_path_length: Before performing the search, paths which are too
short or too long are filtered away. This is the maximum length, in
pixels.
shape_cost_method: 'angle_shape_cost'
num_control_points: All shape cost models samples equally spaced
control points along the paths whose shape cost are to be
calculated. This is the number of such control points to sample.
mean_angles: [Only for 'angle_shape_cost']
inv_angles_covariance_matrix: [Only for 'angle_shape_cost']
For these two parameters, see calculate_angle_shape_cost().
overlap_leftover_method:
'skeleton_length'. The overlap/leftover calculation method to use.
Note that if selection_method is 'dfs_prune', then this must be
'skeleton_length'.
selection_method: 'dfs_prune'. The search method
to be used.
median_worm_area: Scalar double. The approximate area of a typical
worm.
This approximates the number of worms in the
cluster. Is only used to estimate the best branching factors in the
search tree. If approx_max_search_n is infinite, then this is in
fact not used at all.
overlap_weight: Scalar double. The weight factor assigned to
overlaps, i.e. the a in the formula of the cost to be minimised.
the unit is (shape cost unit)/(pixels as a unit of
skeleton length).
leftover_weight: The
weight factor assigned to leftover pieces, i.e. the b in the
formula of the cost to be minimised. In units of (shape cost
unit)/(pixels of skeleton length).
approx_max_search_n: [Only used if selection_method is 'dfs_prune']
Outputs:
paths_coords_selected: A cell array of worms selected. Each worm is
represented as 2xm array of coordinates, specifying the skeleton of
the worm as a polyline path.
"""
min_path_length = params.min_path_length
max_path_length = params.max_path_length
median_worm_area = params.median_worm_area
num_control_points = params.num_control_points
mean_angles = params.mean_angles
inv_angles_covariance_matrix = params.inv_angles_covariance_matrix
component = labels == i
max_num_worms = int(np.ceil(np.sum(component) / median_worm_area))
# First, filter out based on path length
# Simultaneously build a vector of shape costs and a vector of
# reconstructed binaries for each of the (accepted) paths.
#
# List of tuples of path structs that pass filter + cost of shape
#
paths_and_costs = []
for i, path in enumerate(paths):
current_path_coords = self.path_to_pixel_coords(graph, path)
cumul_lengths = self.calculate_cumulative_lengths(current_path_coords)
total_length = cumul_lengths[-1]
if total_length > max_path_length or total_length < min_path_length:
continue
control_coords = self.sample_control_points(
current_path_coords, cumul_lengths, num_control_points)
#
# Calculate the shape cost
#
current_shape_cost = self.calculate_angle_shape_cost(
control_coords, total_length, mean_angles,
inv_angles_covariance_matrix)
if current_shape_cost < params.cost_threshold:
paths_and_costs.append((path, current_shape_cost))
if len(paths_and_costs) == 0:
return []
path_segment_matrix = np.zeros(
(len(graph.segments), len(paths_and_costs)), bool)
for i, (path, cost) in enumerate(paths_and_costs):
path_segment_matrix[path.segments, i] = True
overlap_weight = self.overlap_weight(params)
leftover_weight = self.leftover_weight(params)
#
# Sort by increasing cost
#
costs = np.array([cost for path, cost in paths_and_costs])
order = np.lexsort([costs])
if len(order) > MAX_PATHS:
order = order[:MAX_PATHS]
costs = costs[order]
path_segment_matrix = path_segment_matrix[:, order]
current_best_subset, current_best_cost = self.fast_selection(
costs, path_segment_matrix, graph.segment_lengths,
overlap_weight, leftover_weight, max_num_worms)
selected_paths = [paths_and_costs[order[i]][0]
for i in current_best_subset]
path_coords_selected = [ self.path_to_pixel_coords(graph, path)
for path in selected_paths]
return path_coords_selected
def fast_selection(self, costs, path_segment_matrix, segment_lengths,
overlap_weight, leftover_weight, max_num_worms):
'''Select the best subset of paths using a breadth-first search
costs - the shape costs of every path
path_segment_matrix - an N x M matrix where N are the segments
and M are the paths. A cell is true if a path includes the segment
segment_lengths - the length of each segment
overlap_weight - the penalty per pixel of an overlap
leftover_weight - the penalty per pixel of an unincluded segment
max_num_worms - maximum # of worms allowed in returned match.
'''
current_best_subset = []
current_best_cost = np.sum(segment_lengths) * leftover_weight
current_costs = costs
current_path_segment_matrix = path_segment_matrix.astype(int)
current_path_choices = np.eye(len(costs), dtype = bool)
for i in range(min(max_num_worms, len(costs))):
current_best_subset, current_best_cost, \
current_path_segment_matrix, current_path_choices = \
self.select_one_level(
costs, path_segment_matrix, segment_lengths,
current_best_subset, current_best_cost,
current_path_segment_matrix, current_path_choices,
overlap_weight, leftover_weight)
if np.prod(current_path_choices.shape) == 0:
break
return current_best_subset, current_best_cost
def select_one_level(self, costs, path_segment_matrix, segment_lengths,
current_best_subset, current_best_cost,
current_path_segment_matrix, current_path_choices,
overlap_weight, leftover_weight):
'''Select from among sets of N paths
Select the best subset from among all possible sets of N paths,
then create the list of all sets of N+1 paths
costs - shape costs of each path
path_segment_matrix - a N x M boolean matrix where N are the segments
and M are the paths and True means that a path has a given segment
segment_lengths - the lengths of the segments (for scoring)
current_best_subset - a list of the paths in the best collection so far
current_best_cost - the total cost of that subset
current_path_segment_matrix - a matrix giving the number of times
a segment appears in each of the paths to be considered
current_path_choices - an N x M matrix where N is the number of paths
and M is the number of sets: the value at a cell is True if a path
is included in that set.
returns the current best subset, the current best cost and
the current_path_segment_matrix and current_path_choices for the
next round.
'''
#
# Compute the cost, not considering uncovered segments
#
partial_costs = (
#
# The sum of the individual costs of the chosen paths
#
np.sum(costs[:, np.newaxis] * current_path_choices, 0) +
#
# The sum of the multiply-covered segment lengths * penalty
#
np.sum(np.maximum(current_path_segment_matrix - 1, 0) *
segment_lengths[:, np.newaxis], 0) * overlap_weight)
total_costs = (partial_costs +
#
# The sum of the uncovered segments * the penalty
#
np.sum((current_path_segment_matrix[:,:] == 0) *
segment_lengths[:, np.newaxis], 0) * leftover_weight)
order = np.lexsort([total_costs])
if total_costs[order[0]] < current_best_cost:
current_best_subset = np.argwhere(current_path_choices[:,order[0]]).flatten().tolist()
current_best_cost = total_costs[order[0]]
#
# Weed out any that can't possibly be better
#
mask = partial_costs < current_best_cost
if not np.any(mask):
return current_best_subset, current_best_cost, \
np.zeros((len(costs),0),int), np.zeros((len(costs),0), bool)
order = order[mask[order]]
if len(order) * len(costs) > MAX_CONSIDERED:
# Limit # to consider at next level
order = order[:(1+MAX_CONSIDERED / len(costs))]
current_path_segment_matrix = current_path_segment_matrix[:, order]
current_path_choices = current_path_choices[:, order]
#
# Create a matrix of disallowance - you can only add a path
# that's higher than any existing path
#
i,j = np.mgrid[0:len(costs), 0:len(costs)]
disallow = i >= j
allowed = np.dot(disallow, current_path_choices) == 0
if np.any(allowed):
i,j = np.argwhere(allowed).transpose()
current_path_choices = (np.eye(len(costs), dtype = bool)[:, i] |
current_path_choices[:,j])
current_path_segment_matrix = \
path_segment_matrix[:,i] + current_path_segment_matrix[:,j]
return current_best_subset, current_best_cost, \
current_path_segment_matrix, current_path_choices
else:
return current_best_subset, current_best_cost, \
np.zeros((len(costs), 0), int), np.zeros((len(costs), 0), bool)
def search_recur(self, path_segment_matrix, segment_lengths,
path_raw_costs, overlap_weight, leftover_weight,
current_subset, last_chosen, current_cost,
current_segment_coverings, current_best_subset,
current_best_cost, branching_factors, current_level):
'''Perform a recursive depth-first search on sets of paths
Perform a depth-first search recursively, keeping the best (so far)
found subset of paths in current_best_subset, current_cost.
path_segment_matrix, segment_lengths, path_raw_costs, overlap_weight,
leftover_weight, branching_factor are essentially static.
current_subset is the currently considered subset, as an array of
indices, each index corresponding to a path in path_segment_matrix.
To avoid picking out the same subset twice, we insist that in all
subsets, indices are listed in increasing order.
Note that the shape cost term and the overlap cost term need not be
re-calculated each time, but can be calculated incrementally, as more
paths are added to the subset in consideration. Thus, current_cost holds
the sum of the shape cost and overlap cost terms for current_subset.
current_segments_coverings, meanwhile, is a logical array of length equal
to the number of segments in the graph, keeping track of the segments
covered by paths in current_subset.'''
# The cost of current_subset, including the leftover cost term
this_cost = current_cost + leftover_weight * np.sum(
segment_lengths[~ current_segment_coverings])
if this_cost < current_best_cost:
current_best_cost = this_cost
current_best_subset = current_subset
if current_level < len(branching_factors):
this_branch_factor = branching_factors[current_level]
else:
this_branch_factor = branching_factors[-1]
# Calculate, for each path after last_chosen, how much cost would be added
# to current_cost upon adding that path to the current_subset.
current_overlapped_costs = (
path_raw_costs[last_chosen:] +
np.sum(current_segment_coverings[:, np.newaxis] *
segment_lengths[:, np.newaxis] *
path_segment_matrix[:, last_chosen:], 0) * overlap_weight)
order = np.lexsort([current_overlapped_costs])
#
# limit to number of branches allowed at this level
#
order = order[np.arange(len(order))+1 < this_branch_factor]
for index in order:
new_cost = current_cost + current_overlapped_costs[index]
if new_cost >= current_best_cost:
break # No chance of subseequent better cost
path_index = last_chosen + index
current_best_subset, current_best_cost = self.search_recur(
path_segment_matrix, segment_lengths, path_raw_costs,
overlap_weight, leftover_weight,
current_subset + [path_index],
path_index,
new_cost,
current_segment_coverings | path_segment_matrix[:, path_index],
current_best_subset,
current_best_cost,
branching_factors,
current_level + 1)
return current_best_subset, current_best_cost
def worm_descriptor_building(self, all_path_coords, params, shape):
'''Return the coordinates of reconstructed worms in i,j,v form
Given a list of paths found in an image, reconstructs labeled
worms.
Inputs:
worm_paths: A list of worm paths, each entry an N x 2 array
containing the coordinates of the worm path.
params: the params structure loaded using read_params()
Outputs:
* an Nx3 array where the first two indices are the i,j
coordinate and the third is the worm's label.
* the lengths of each worm
* the angles for control points other than the ends
* the coordinates of the control points
'''
num_control_points = params.num_control_points
if len(all_path_coords) == 0:
return (np.zeros((0,3), int), np.zeros(0),
np.zeros((0, num_control_points-2)),
np.zeros((0, num_control_points)),
np.zeros((0, num_control_points)))
worm_radii = params.radii_from_training
all_i = []
all_j = []
all_lengths = []
all_angles = []
all_control_coords_x = []
all_control_coords_y = []
for path in all_path_coords:
cumul_lengths = self.calculate_cumulative_lengths(path)
control_coords = self.sample_control_points(
path, cumul_lengths, num_control_points)
ii,jj = self.rebuild_worm_from_control_points_approx(
control_coords, worm_radii, shape)
all_i.append(ii)
all_j.append(jj)
all_lengths.append(cumul_lengths[-1])
all_angles.append(self.get_angles(control_coords))
all_control_coords_x.append(control_coords[:,1])
all_control_coords_y.append(control_coords[:,0])
ijv = np.column_stack((
np.hstack(all_i),
np.hstack(all_j),
np.hstack([np.ones(len(ii), int) * (i+1)
for i, ii in enumerate(all_i)])))
all_lengths = np.array(all_lengths)
all_angles = np.vstack(all_angles)
all_control_coords_x = np.vstack(all_control_coords_x)
all_control_coords_y = np.vstack(all_control_coords_y)
return ijv, all_lengths, all_angles, all_control_coords_x, all_control_coords_y
def rebuild_worm_from_control_points_approx(self, control_coords,
worm_radii, shape):
'''Rebuild a worm from its control coordinates
Given a worm specified by some control points along its spline,
reconstructs an approximate binary image representing the worm.
Specifically, this function generates an image where successive control
points have been joined by line segments, and then dilates that by a
certain (specified) radius.
Inputs:
control_coords: A N x 2 double array, where each column contains the x
and y coordinates for a control point.
worm_radius: Scalar double. Approximate radius of a typical worm; the
radius by which the reconstructed worm spline is dilated to form the
final worm.
Outputs:
The coordinates of all pixels in the worm in an N x 2 array'''
index, count, i, j = morph.get_line_pts(control_coords[:-1,0],
control_coords[:-1,1],
control_coords[1:,0],
control_coords[1:,1])
#
# Get rid of the last point for the middle elements - these are
# duplicated by the first point in the next line
#
i = np.delete(i,index[1:])
j = np.delete(j,index[1:])
index = index - np.arange(len(index))
count -= 1
#
# Get rid of all segments that are 1 long. Those will be joined
# by the segments around them.
#
index, count = index[count !=0], count[count != 0]
#
# Find the control point and within-control-point index of each point
#
label = np.zeros(len(i), int)
label[index[1:]] = 1
label = np.cumsum(label)
order = np.arange(len(i)) - index[label]
frac = order.astype(float) / count[label].astype(float)
radius = (worm_radii[label] * (1-frac) +
worm_radii[label+1] * frac)
iworm_radius = int(np.max(np.ceil(radius)))
#
# Get dilation coordinates
#
ii, jj = np.mgrid[-iworm_radius:iworm_radius+1,
-iworm_radius:iworm_radius+1]
dd = np.sqrt((ii*ii + jj*jj).astype(float))
mask = ii*ii + jj*jj <= iworm_radius * iworm_radius
ii = ii[mask]
jj = jj[mask]
dd = dd[mask]
#
# All points (with repeats)
#
i = (i[:,np.newaxis] + ii[np.newaxis, :]).flatten()
j = (j[:,np.newaxis] + jj[np.newaxis, :]).flatten()
#
# We further mask out any dilation coordinates outside of
# the radius at our point in question
#
m = (radius[:,np.newaxis] >= dd[np.newaxis, :]).flatten()
i = i[m]
j = j[m]
#
# Find repeats by sorting and comparing against next
#
order = np.lexsort((i,j))
i = i[order]
j = j[order]
mask = np.hstack([[True], (i[:-1] != i[1:]) | (j[:-1] != j[1:])])
i = i[mask]
j = j[mask]
mask = (i >= 0) & (j >= 0) & (i < shape[0]) & (j < shape[1])
return i[mask], j[mask]
def read_params(self):
'''Read the parameters file'''
if not hasattr(self, "training_params"):
self.training_params = {}
return read_params(self.training_set_directory,
self.training_set_file_name,
self.training_params)
def validate_module(self, pipeline):
if self.mode == MODE_UNTANGLE:
if self.training_set_directory.dir_choice != cpprefs.URL_FOLDER_NAME:
path = os.path.join(
self.training_set_directory.get_absolute_path(),
self.training_set_file_name.value)
if not os.path.exists(path):
raise cps.ValidationError(
"Can't find file %s" %
self.training_set_file_name.value,
self.training_set_file_name)
def validate_module_warnings(self, pipeline):
'''Warn user re: Test mode '''
if pipeline.test_mode and self.mode == MODE_TRAIN:
raise cps.ValidationError("UntangleWorms will not produce training set output in Test Mode",
self.training_set_file_name)
def get_measurement_columns(self, pipeline):
'''Return a column of information for each measurement feature'''
result = []
if self.mode == MODE_UNTANGLE:
object_names = []
if self.overlap in (OO_WITH_OVERLAP, OO_BOTH):
object_names.append(self.overlap_objects.value)
if self.overlap in (OO_WITHOUT_OVERLAP, OO_BOTH):
object_names.append(self.nonoverlapping_objects.value)
for object_name in object_names:
result += I.get_object_measurement_columns(object_name)
all_features = ([F_LENGTH] + self.angle_features() +
self.control_point_features(True)+
self.control_point_features(False))
result += [
(object_name, "_".join((C_WORM, f)), cpmeas.COLTYPE_FLOAT)
for f in all_features]
return result
def angle_features(self):
'''Return a list of angle feature names'''
try:
return ["_".join((F_ANGLE, str(n)))
for n in range(1, self.ncontrol_points()-1)]
except:
logger.error("Failed to get # of control points from training file. Unknown number of angle measurements", exc_info=True)
return []
def control_point_features(self, get_x):
'''Return a list of control point feature names
get_x - return the X coordinate control point features if true, else y
'''
try:
return ["_".join((F_CONTROL_POINT_X if get_x else F_CONTROL_POINT_Y, str(n)))
for n in range(1, self.ncontrol_points()+1)]
except:
logger.error("Failed to get # of control points from training file. Unknown number of control point features", exc_info=True)
return []
def get_categories(self, pipeline, object_name):
if object_name == cpmeas.IMAGE:
return [I.C_COUNT]
if ((object_name == self.overlap_objects.value and
self.overlap in (OO_BOTH, OO_WITH_OVERLAP)) or
(object_name == self.nonoverlapping_objects.value and
self.overlap in (OO_BOTH, OO_WITHOUT_OVERLAP))):
return [I.C_LOCATION, I.C_NUMBER, C_WORM]
return []
def get_measurements(self, pipeline, object_name, category):
wants_overlapping = self.overlap in (OO_BOTH, OO_WITH_OVERLAP)
wants_nonoverlapping = self.overlap in (OO_BOTH, OO_WITHOUT_OVERLAP)
result = []
if object_name == cpmeas.IMAGE and category == I.C_COUNT:
if wants_overlapping:
result += [self.overlap_objects.value]
if wants_nonoverlapping:
result += [self.nonoverlapping_objects.value]
if ((wants_overlapping and object_name == self.overlap_objects) or
(wants_nonoverlapping and object_name == self.nonoverlapping_objects)):
if category == I.C_LOCATION:
result += [I.FTR_CENTER_X, I.FTR_CENTER_Y]
elif category == I.C_NUMBER:
result += [I.FTR_OBJECT_NUMBER]
elif category == C_WORM:
result += [F_LENGTH, F_ANGLE, F_CONTROL_POINT_X, F_CONTROL_POINT_Y]
return result
def get_measurement_scales(self, pipeline, object_name, category,
measurement, image_name):
wants_overlapping = self.overlap in (OO_BOTH, OO_WITH_OVERLAP)
wants_nonoverlapping = self.overlap in (OO_BOTH, OO_WITHOUT_OVERLAP)
scales = []
if (((wants_overlapping and object_name == self.overlap_objects) or
(wants_nonoverlapping and object_name == self.nonoverlapping_objects)) and
(category == C_WORM)):
if measurement == F_ANGLE:
scales += [str(n) for n in range(1, self.ncontrol_points()-1)]
elif measurement in [F_CONTROL_POINT_X, F_CONTROL_POINT_Y]:
scales += [str(n) for n in range(1, self.ncontrol_points()+1)]
return scales
def prepare_to_create_batch(self, workspace, fn_alter_path):
'''Prepare to create a batch file
This function is called when CellProfiler is about to create a
file for batch processing. It will pickle the image set list's
"legacy_fields" dictionary. This callback lets a module prepare for
saving.
pipeline - the pipeline to be saved
image_set_list - the image set list to be saved
fn_alter_path - this is a function that takes a pathname on the local
host and returns a pathname on the remote host. It
handles issues such as replacing backslashes and
mapping mountpoints. It should be called for every
pathname stored in the settings or legacy fields.
'''
self.training_set_directory.alter_for_create_batch_files(fn_alter_path)
return True
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
if variable_revision_number == 1:
# Added complexity
setting_values = setting_values + [C_ALL, "400"]
variable_revision_number = 2
return setting_values, variable_revision_number, from_matlab
def read_params(training_set_directory, training_set_file_name, d):
'''Read a training set parameters file
training_set_directory - the training set directory setting
training_set_file_name - the training set file name setting
d - a dictionary that stores cached parameters
'''
#
# The parameters file is a .xml file with the following structure:
#
# initial_filter
# min_worm_area: float
# single_worm_determination
# max_area: float
# single_worm_find_path
# method: string (=? "dfs_longest_path")
# single_worm_filter
# method: string (=? "angle_shape_cost")
# cost_threshold: float
# num_control_points: int
# mean_angles: float vector (num_control_points -1 entries)
# inv_angles_covariance_matrix: float matrix (num_control_points -1)**2
# cluster_graph_building
# method: "large_branch_area_max_skel_length"
# max_radius: float
# max_skel_length: float
# cluster_paths_finding
# method: string "dfs"
# cluster_paths_selection
# shape_cost_method: "angle_shape_model"
# selection_method: "dfs_prune"
# overlap_leftover_method: "skeleton_length"
# min_path_length: float
# max_path_length: float
# median_worm__area: float
# worm_radius: float
# overlap_weight: int
# leftover_weight: int
# ---- the following are the same as for the single worm filter ---
# num_control_points: int
# mean_angles: float vector (num_control_points-1)
# inv_angles_covariance_matrix: (num_control_points-1)**2
# ----
# approx_max_search_n: int
# worm_descriptor_building
# method: string = "default"
# radii_from_training: vector ?of length num_control_points?
#
class X(object):
'''This "class" is used as a vehicle for arbitrary dot notation
For instance:
> x = X()
> x.foo = 1
> x.foo
1
'''
pass
path = training_set_directory.get_absolute_path()
file_name = training_set_file_name.value
if d.has_key(file_name):
result, timestamp = d[file_name]
if (timestamp == "URL" or
timestamp == os.stat(os.path.join(path, file_name)).st_mtime):
return d[file_name][0]
if training_set_directory.dir_choice == cps.URL_FOLDER_NAME:
url = file_name
fd_or_file = urllib2.urlopen(url)
is_url = True
timestamp = "URL"
else:
fd_or_file = os.path.join(path, file_name)
is_url = False
timestamp = os.stat(fd_or_file).st_mtime
try:
from xml.dom.minidom import parse
doc = parse(fd_or_file)
result = X()
def f(tag, attribute, klass):
elements = doc.documentElement.getElementsByTagName(tag)
assert len(elements) == 1
element = elements[0]
text = "".join([text.data for text in element.childNodes
if text.nodeType == doc.TEXT_NODE])
setattr(result, attribute, klass(text.strip()))
for tag, attribute, klass in (
(T_VERSION, "version", int),
(T_MIN_AREA, "min_worm_area", float),
(T_MAX_AREA, "max_area", float),
(T_COST_THRESHOLD, "cost_threshold", float),
(T_NUM_CONTROL_POINTS, "num_control_points", int),
(T_MAX_RADIUS, "max_radius", float),
(T_MAX_SKEL_LENGTH, "max_skel_length", float),
(T_MIN_PATH_LENGTH, "min_path_length", float),
(T_MAX_PATH_LENGTH, "max_path_length", float),
(T_MEDIAN_WORM_AREA, "median_worm_area", float),
(T_OVERLAP_WEIGHT, "overlap_weight", float),
(T_LEFTOVER_WEIGHT, "leftover_weight", float)):
f(tag, attribute, klass)
elements = doc.documentElement.getElementsByTagName(T_MEAN_ANGLES)
assert len(elements) == 1
element = elements[0]
result.mean_angles = np.zeros(result.num_control_points-1)
for index, value_element in enumerate(
element.getElementsByTagName(T_VALUE)):
text = "".join([text.data for text in value_element.childNodes
if text.nodeType == doc.TEXT_NODE])
result.mean_angles[index] = float(text.strip())
elements = doc.documentElement.getElementsByTagName(T_RADII_FROM_TRAINING)
assert len(elements) == 1
element = elements[0]
result.radii_from_training = np.zeros(result.num_control_points)
for index, value_element in enumerate(
element.getElementsByTagName(T_VALUE)):
text = "".join([text.data for text in value_element.childNodes
if text.nodeType == doc.TEXT_NODE])
result.radii_from_training[index] = float(text.strip())
result.inv_angles_covariance_matrix = np.zeros(
[result.num_control_points-1] * 2)
elements = doc.documentElement.getElementsByTagName(T_INV_ANGLES_COVARIANCE_MATRIX)
assert len(elements) == 1
element = elements[0]
for i, values_element in enumerate(
element.getElementsByTagName(T_VALUES)):
for j, value_element in enumerate(
values_element.getElementsByTagName(T_VALUE)):
text = "".join([text.data for text in value_element.childNodes
if text.nodeType == doc.TEXT_NODE])
result.inv_angles_covariance_matrix[i,j] = float(text.strip())
except:
if is_url:
fd_or_file = urllib2.urlopen(url)
mat_params = loadmat(fd_or_file)["params"][0,0]
field_names = mat_params.dtype.fields.keys()
result = X()
CLUSTER_PATHS_SELECTION = 'cluster_paths_selection'
CLUSTER_GRAPH_BUILDING = 'cluster_graph_building'
SINGLE_WORM_FILTER = 'single_worm_filter'
INITIAL_FILTER = 'initial_filter'
SINGLE_WORM_DETERMINATION = 'single_worm_determination'
CLUSTER_PATHS_FINDING = 'cluster_paths_finding'
WORM_DESCRIPTOR_BUILDING = 'worm_descriptor_building'
SINGLE_WORM_FIND_PATH = 'single_worm_find_path'
METHOD = "method"
STRING = "string"
SCALAR = "scalar"
VECTOR = "vector"
MATRIX = "matrix"
def mp(*args, **kwargs):
'''Look up a field from mat_params'''
x = mat_params
for arg in args[:-1]:
x = x[arg][0,0]
x = x[args[-1]]
kind = kwargs.get("kind", SCALAR)
if kind == SCALAR:
return x[0,0]
elif kind == STRING:
return x[0]
elif kind == VECTOR:
# Work-around for OS/X Numpy bug
# Copy a possibly mis-aligned buffer
b = np.array([v for v in np.frombuffer(x.data, np.uint8)], np.uint8)
return np.frombuffer(b, x.dtype)
return x
result.min_worm_area = mp(INITIAL_FILTER, "min_worm_area")
result.max_area = mp(SINGLE_WORM_DETERMINATION, "max_area")
result.cost_threshold = mp(SINGLE_WORM_FILTER, "cost_threshold")
result.num_control_points = mp(SINGLE_WORM_FILTER, "num_control_points")
result.mean_angles = mp(SINGLE_WORM_FILTER, "mean_angles", kind = VECTOR)
result.inv_angles_covariance_matrix = mp(
SINGLE_WORM_FILTER, "inv_angles_covariance_matrix", kind = MATRIX)
result.max_radius = mp(CLUSTER_GRAPH_BUILDING,
"max_radius")
result.max_skel_length = mp(CLUSTER_GRAPH_BUILDING,
"max_skel_length")
result.min_path_length = mp(
CLUSTER_PATHS_SELECTION, "min_path_length")
result.max_path_length = mp(
CLUSTER_PATHS_SELECTION, "max_path_length")
result.median_worm_area = mp(
CLUSTER_PATHS_SELECTION, "median_worm_area")
result.worm_radius = mp(
CLUSTER_PATHS_SELECTION, "worm_radius")
result.overlap_weight = mp(
CLUSTER_PATHS_SELECTION, "overlap_weight")
result.leftover_weight = mp(
CLUSTER_PATHS_SELECTION, "leftover_weight")
result.radii_from_training = mp(
WORM_DESCRIPTOR_BUILDING, "radii_from_training", kind = VECTOR)
d[file_name] = (result, timestamp)
return result
def recalculate_single_worm_control_points(all_labels, ncontrolpoints):
'''Recalculate the control points for labeled single worms
Given a labeling of single worms, recalculate the control points
for those worms.
all_labels - a sequence of label matrices
ncontrolpoints - the # of desired control points
returns a two tuple:
an N x M x 2 array where the first index is the object number,
the second index is the control point number and the third index is 0
for the Y or I coordinate of the control point and 1 for the X or J
coordinate.
a vector of N lengths.
'''
all_object_numbers = [
filter((lambda n: n > 0), np.unique(l)) for l in all_labels]
if all([len(object_numbers) == 0 for object_numbers in all_object_numbers]):
return np.zeros((0, ncontrolpoints, 2), int), np.zeros(0, int)
module = UntangleWorms()
module.create_settings()
module.num_control_points.value = ncontrolpoints
#
# Put the module in training mode - assumes that the training file is
# not present.
#
module.mode.value = MODE_TRAIN
nobjects = np.max(np.hstack(all_object_numbers))
result = np.ones((nobjects, ncontrolpoints, 2)) * np.nan
lengths = np.zeros(nobjects)
for object_numbers, labels in zip(all_object_numbers, all_labels):
for object_number in object_numbers:
mask = (labels == object_number)
skeleton = morph.skeletonize(mask)
graph = module.get_graph_from_binary(mask, skeleton)
path_coords, path = module.get_longest_path_coords(
graph, np.iinfo(int).max)
if len(path_coords) == 0:
# return NaN for the control points
continue
cumul_lengths = module.calculate_cumulative_lengths(path_coords)
if cumul_lengths[-1] == 0:
continue
control_points = module.sample_control_points(
path_coords, cumul_lengths, ncontrolpoints)
result[(object_number-1), :, :] = control_points
lengths[object_number-1] = cumul_lengths[-1]
return result, lengths
| gpl-2.0 |
griffinqiu/airflow | airflow/www/views.py | 1 | 72587 | import sys
import os
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
from itertools import chain, product
from past.utils import old_div
from past.builtins import basestring
import inspect
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_
from flask import redirect, url_for, request, Markup, Response, current_app, render_template
from flask.ext.admin import BaseView, expose, AdminIndexView
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.admin.actions import action
from flask.ext.login import current_user, flash, logout_user, login_required
from flask._compat import PY2
import jinja2
import markdown
import json
from wtforms import (
widgets,
Form, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import models
from airflow.settings import Session
from airflow import configuration
from airflow import login
from airflow import utils
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
from airflow import settings
from airflow.models import State
from airflow.www.forms import DateTimeForm, TreeForm
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(os.path.expanduser(configuration.get('core', 'DAGS_FOLDER')))
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
if configuration.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(k, df[col][k])
for k in df[col].keys()
if not np.isnan(df[col][k])]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
#@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
dag_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
if not dag.is_subdag:
dag_ids.append(dag.dag_id)
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.filter(TI.dag_id.in_(dag_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=configuration.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=utils.State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.gethostname()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow import ascii as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.gethostname(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {
'headers': {k: v for k, v in request.headers},
}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
if hasattr(current_user, 'username'):
d['username'] = current_user.username
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
configuration.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = os.path.join(BASE_LOG_FOLDER, log_relative)
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
log_loaded = False
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
log_loaded = True
except:
log = "*** Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
configuration.get('celery', 'WORKER_LOG_SERVER_PORT')
url = os.path.join(
"http://{host}:{WORKER_LOG_SERVER_PORT}/log", log_relative
).format(**locals())
log += "*** Log file isn't local.\n"
log += "*** Fetching here: {url}\n".format(**locals())
try:
import requests
log += '\n' + requests.get(url).text
log_loaded = True
except:
log += "*** Failed to fetch log file from worker.\n".format(
**locals())
# try to load log backup from S3
s3_log_folder = configuration.get('core', 'S3_LOG_FOLDER')
if not log_loaded and s3_log_folder.startswith('s3:'):
import boto
s3 = boto.connect_s3()
s3_log_loc = os.path.join(
configuration.get('core', 'S3_LOG_FOLDER'), log_relative)
log += '*** Fetching log from S3: {}\n'.format(s3_log_loc)
log += ('*** Note: S3 logs are only available once '
'tasks have completed.\n')
bucket, key = s3_log_loc.lstrip('s3:/').split('/', 1)
s3_key = boto.s3.key.Key(s3.get_bucket(bucket), key)
if s3_key.exists():
log += '\n' + s3_key.get_contents_as_string().decode()
else:
log += '*** No log found on S3.\n'
session.commit()
session.close()
log = log.decode('utf-8') if PY2 else log
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
try:
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
MAX_PERIODS = 1000
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
end_date = ((dag.latest_execution_date or datetime.now())
if future else execution_date)
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
if dag.schedule_interval == '@once':
dates = [start_date]
else:
dates = dag.date_range(start_date, end_date=end_date)
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids)).all()
tis_to_change = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids),
TI.state != State.SUCCESS).all()
tasks = list(product(task_ids, dates))
tis_to_create = list(
set(tasks) -
set([(ti.task_id, ti.execution_date) for ti in tis]))
tis_all_altered = list(chain(
[(ti.task_id, ti.execution_date) for ti in tis_to_change],
tis_to_create))
if len(tis_all_altered) > MAX_PERIODS:
flash("Too many tasks at once (>{0})".format(
MAX_PERIODS), 'error')
return redirect(origin)
if confirmed:
for ti in tis_to_change:
ti.state = State.SUCCESS
session.commit()
for task_id, task_execution_date in tis_to_create:
ti = TI(
task=dag.get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(tis_all_altered)))
return redirect(origin)
else:
if not tis_all_altered:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id, task_execution_date in tis_all_altered:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = configuration.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id==dag.dag_id,
DR.execution_date<=base_date,
DR.execution_date>=min_date)
.all()
)
dag_runs = {
dr.execution_date: utils.alchemy_to_dict(dr) for dr in dag_runs}
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
task_instances = {}
for ti in tis:
tid = utils.alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
task_instances.get((task.task_id, d)) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
form = TreeForm(data={'base_date': max_date, 'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = configuration.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
DR = models.DagRun
drs = session.query(DR).filter_by(dag_id=dag_id).all()
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=json.dumps(all_data),
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=configuration.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
ts = ti.execution_date
if dag.schedule_interval:
ts = dag.following_schedule(ts)
secs = old_div((ti.end_date - ts).total_seconds(), 60*60)
data.append([ti.execution_date.isoformat(), secs])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=json.dumps(all_data),
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=configuration.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/paused')
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = configuration.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# filter the dags if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
if do_filter:
qry = (
session.query(DM)
.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.username)
.all()
)
else:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
if do_filter:
dags = {
dag.dag_id: dag
for dag in dags
if (
dag.owner == current_user.username and (not dag.parent_dag)
)
}
else:
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.now()
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
@utils.provide_session
def set_dagrun_state(ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
count += 1
dr.state = target_state
session.commit()
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_delete = True
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
set_dagrun_state(ids, State.SUCCESS)
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'pool', 'log')
can_delete = True
page_size = 500
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted',)
form_overrides = dict(_password=PasswordField)
form_widget_args = {
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
}
form_choices = {
'conn_type': [
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
return not configuration.has_option('core', 'fernet_key')
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords can't be encrypted.
"""
is_secure = False
try:
import cryptography
configuration.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
if configuration.getboolean("webserver", "expose_config"):
with open(configuration.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
bricegnichols/urbansim | urbansim/models/tests/test_transition.py | 3 | 16010 | import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import transition
from ...utils import testing as ust
@pytest.fixture
def basic_df():
return pd.DataFrame(
{'x': range(5),
'y': range(5, 10)},
index=range(100, 105))
@pytest.fixture
def year():
return 2112
@pytest.fixture
def totals_col():
return 'total'
@pytest.fixture
def rates_col():
return 'growth_rate'
@pytest.fixture
def grow_targets(year, totals_col):
return pd.DataFrame({totals_col: [7]}, index=[year])
@pytest.fixture
def grow_targets_filters(year, totals_col):
return pd.DataFrame({'x_min': [0, 2, np.nan],
'y_max': [7, 9, np.nan],
'x': [np.nan, np.nan, 4],
totals_col: [1, 4, 10]},
index=[year, year, year])
@pytest.fixture(scope='function')
def random_df(request):
"""
Seed the numpy prng and return a data frame w/ predictable test inputs
so that the tests will have consistent results across builds.
"""
old_state = np.random.get_state()
def fin():
# tear down: reset the prng after the test to the pre-test state
np.random.set_state(old_state)
request.addfinalizer(fin)
np.random.seed(1)
return pd.DataFrame(
{'some_count': np.random.randint(1, 8, 20)},
index=range(0, 20))
@pytest.fixture
def growth_rates(rates_col, totals_col, grow_targets):
del grow_targets[totals_col]
grow_targets[rates_col] = [0.4]
return grow_targets
@pytest.fixture
def growth_rates_filters(rates_col, totals_col, grow_targets_filters):
del grow_targets_filters[totals_col]
grow_targets_filters[rates_col] = [0.5, -0.5, 0]
return grow_targets_filters
def assert_empty_index(index):
pdt.assert_index_equal(index, pd.Index([]))
def assert_for_add(new, added):
assert len(new) == 7
pdt.assert_index_equal(added, pd.Index([105, 106]))
def assert_for_remove(new, added):
assert len(new) == 3
assert_empty_index(added)
def test_add_rows(basic_df):
nrows = 2
new, added, copied = transition.add_rows(basic_df, nrows)
assert_for_add(new, added)
assert len(copied) == nrows
assert copied.isin(basic_df.index).all()
def test_add_rows_starting_index(basic_df):
nrows = 2
starting_index = 1000
new, added, copied = transition.add_rows(basic_df, nrows, starting_index)
assert len(new) == len(basic_df) + nrows
pdt.assert_index_equal(added, pd.Index([1000, 1001]))
assert len(copied) == nrows
assert copied.isin(basic_df.index).all()
def test_add_rows_zero(basic_df):
nrows = 0
new, added, copied = transition.add_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
def test_add_rows_with_accounting(random_df):
control = 10
new, added, copied = transition.add_rows(
random_df, control, accounting_column='some_count')
assert control == new.loc[copied]['some_count'].sum()
assert copied.isin(random_df.index).all()
def test_remove_rows(basic_df):
nrows = 2
new, removed_indexes = transition.remove_rows(basic_df, nrows)
assert_for_remove(new, transition._empty_index())
assert len(removed_indexes) == nrows
assert removed_indexes.isin(basic_df.index).all()
def test_remove_rows_zero(basic_df):
nrows = 0
new, removed = transition.remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(removed)
def test_remove_rows_all(basic_df):
nrows = len(basic_df)
new, removed = transition.remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df.loc[[]])
ust.assert_index_equal(removed, basic_df.index)
def test_remove_rows_with_accounting(random_df):
control = 10
new, removed = transition.remove_rows(
random_df, control, accounting_column='some_count')
assert control == random_df.loc[removed]['some_count'].sum()
assert removed.isin(random_df.index).all()
def test_remove_rows_raises(basic_df):
# should raise ValueError if asked to remove more rows than
# are in the table
nrows = 25
with pytest.raises(ValueError):
transition.remove_rows(basic_df, nrows)
def test_add_or_remove_rows_add(basic_df):
nrows = 2
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
assert_for_add(new, added)
assert len(copied) == abs(nrows)
assert copied.isin(basic_df.index).all()
assert_empty_index(removed)
def test_add_or_remove_rows_remove(basic_df):
nrows = -2
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
assert_for_remove(new, added)
assert len(removed) == abs(nrows)
assert removed.isin(basic_df.index).all()
assert_empty_index(copied)
def test_add_or_remove_rows_zero(basic_df):
nrows = 0
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_grtransition_add(basic_df):
growth_rate = 0.4
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
assert_for_add(new, added)
assert len(copied) == 2
assert copied.isin(basic_df.index).all()
assert_empty_index(removed)
def test_grtransition_add_with_accounting(random_df):
growth_rate = .1
year = 2012
orig_total = random_df['some_count'].sum()
growth = int(round(orig_total * growth_rate))
target = orig_total + growth
grt = transition.GrowthRateTransition(growth_rate, 'some_count')
new, added, copied, removed = grt(random_df, year)
assert growth == new.loc[copied]['some_count'].sum()
assert target == new['some_count'].sum()
assert copied.isin(random_df.index).all()
assert_empty_index(removed)
def test_grtransition_remove(basic_df):
growth_rate = -0.4
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
assert_for_remove(new, added)
assert_empty_index(copied)
assert len(removed) == 2
assert removed.isin(basic_df.index).all()
def test_grtransition_remove_with_accounting(random_df):
growth_rate = -.1
year = 2012
orig_total = random_df['some_count'].sum()
change = -1 * int(round(orig_total * growth_rate))
target = orig_total - change
grt = transition.GrowthRateTransition(growth_rate, 'some_count')
new, added, copied, removed = grt(random_df, year)
assert change == random_df.loc[removed]['some_count'].sum()
assert target == new['some_count'].sum()
assert removed.isin(random_df.index).all()
assert_empty_index(added)
assert_empty_index(copied)
def test_grtransition_remove_all(basic_df):
growth_rate = -1
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_grtransition_zero(basic_df):
growth_rate = 0
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_tgrtransition_add(basic_df, growth_rates, year, rates_col):
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 7
bdf_imax = basic_df.index.values.max()
assert pd.Series([bdf_imax + 1, bdf_imax + 2]).isin(new.index).all()
assert len(copied) == 2
assert_empty_index(removed)
def test_tgrtransition_remove(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = -0.4
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 3
assert_empty_index(added)
assert_empty_index(copied)
assert len(removed) == 2
def test_tgrtransition_with_accounting(random_df):
"""
Test segmented growth rate transitions--with an accounting
column--using 1 test w/ mixed growth rates trends:
declining, growing and no growth.
"""
grp1 = random_df.copy()
grp1['segment'] = 'a'
grp2 = random_df.copy()
grp2['segment'] = 'b'
grp3 = random_df.copy()
grp3['segment'] = 'c'
test_df = pd.concat([grp1, grp2, grp3], axis=0, ignore_index=True)
orig_total = random_df['some_count'].sum()
year = 2012
growth_rates = pd.DataFrame(
{
'grow_rate': [-0.1, 0.25, 0],
'segment': ['a', 'b', 'c']
},
index=[year, year, year])
tgrt = transition.TabularGrowthRateTransition(
growth_rates, 'grow_rate', 'some_count')
new, added, copied, removed = tgrt.transition(test_df, year)
added_rows = new.loc[copied]
removed_rows = test_df.loc[removed]
# test a declining segment
a_added_rows = added_rows[added_rows['segment'] == 'a']
a_removed_rows = removed_rows[removed_rows['segment'] == 'a']
a_change = int(round(orig_total * -0.1))
a_target = orig_total + a_change
assert a_change * -1 == a_removed_rows['some_count'].sum()
assert a_target == new[new['segment'] == 'a']['some_count'].sum()
assert_empty_index(a_added_rows.index)
# test a growing segment
b_added_rows = added_rows[added_rows['segment'] == 'b']
b_removed_rows = removed_rows[removed_rows['segment'] == 'b']
b_change = int(round(orig_total * 0.25))
b_target = orig_total + b_change
assert b_change == b_added_rows['some_count'].sum()
assert b_target == new[new['segment'] == 'b']['some_count'].sum()
assert_empty_index(b_removed_rows.index)
# test a no change segment
c_added_rows = added_rows[added_rows['segment'] == 'c']
c_removed_rows = removed_rows[removed_rows['segment'] == 'c']
assert orig_total == new[new['segment'] == 'c']['some_count'].sum()
assert_empty_index(c_added_rows.index)
assert_empty_index(c_removed_rows.index)
def test_tgrtransition_remove_all(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = -1
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_tgrtransition_zero(basic_df, growth_rates, year, rates_col):
growth_rates[rates_col] = 0
tgrt = transition.TabularGrowthRateTransition(growth_rates, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_tgrtransition_filters(
basic_df, growth_rates_filters, year, rates_col):
tgrt = transition.TabularGrowthRateTransition(
growth_rates_filters, rates_col)
new, added, copied, removed = tgrt.transition(basic_df, year)
assert len(new) == 5
assert basic_df.index.values.max() + 1 in new.index
assert len(copied) == 1
assert len(removed) == 1
def test_tabular_transition_add(basic_df, grow_targets, totals_col, year):
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
assert_for_add(new, added)
bdf_imax = basic_df.index.values.max()
assert pd.Series([bdf_imax + 1, bdf_imax + 2]).isin(new.index).all()
assert len(copied) == 2
assert_empty_index(removed)
def test_tabular_transition_remove(basic_df, grow_targets, totals_col, year):
grow_targets[totals_col] = [3]
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
assert_for_remove(new, added)
assert_empty_index(copied)
assert len(removed) == 2
def test_tabular_transition_remove_all(
basic_df, grow_targets, totals_col, year):
grow_targets[totals_col] = [0]
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_tabular_transition_raises_on_bad_year(
basic_df, grow_targets, totals_col, year):
tran = transition.TabularTotalsTransition(grow_targets, totals_col)
with pytest.raises(ValueError):
tran.transition(basic_df, year + 100)
def test_tabular_transition_add_filters(
basic_df, grow_targets_filters, totals_col, year):
tran = transition.TabularTotalsTransition(grow_targets_filters, totals_col)
new, added, copied, removed = tran.transition(basic_df, year)
assert len(new) == grow_targets_filters[totals_col].sum()
assert basic_df.index.values.max() + 1 in new.index
assert len(copied) == 11
assert len(removed) == 1
def test_update_linked_table(basic_df):
col_name = 'x'
added = pd.Index([5, 6, 7])
copied = pd.Index([1, 3, 1])
removed = pd.Index([0])
updated = transition._update_linked_table(
basic_df, col_name, added, copied, removed)
assert len(updated) == len(basic_df) + len(added) - len(removed)
npt.assert_array_equal(updated[col_name].values, [1, 2, 3, 4, 5, 7, 6])
pdt.assert_series_equal(
updated['y'],
pd.Series([6, 7, 8, 9, 6, 6, 8], index=updated.index, name='y'))
def test_updated_linked_table_remove_only(basic_df):
col_name = 'x'
added = pd.Index([])
copied = pd.Index([])
removed = pd.Index([1, 3])
updated = transition._update_linked_table(
basic_df, col_name, added, copied, removed)
assert len(updated) == len(basic_df) + len(added) - len(removed)
def test_transition_model(basic_df, grow_targets_filters, totals_col, year):
grow_targets_filters[totals_col] = [3, 1, 1]
tran = transition.TabularTotalsTransition(grow_targets_filters, totals_col)
model = transition.TransitionModel(tran)
linked_table = pd.DataFrame(
{'z': ['a', 'b', 'c', 'd', 'e'],
'thing_id': basic_df.index})
new, added, new_linked = model.transition(
basic_df, year, linked_tables={'linked': (linked_table, 'thing_id')})
assert len(new) == grow_targets_filters[totals_col].sum()
assert new.index.values.max() == basic_df.index.values.max() + 1
assert len(new_linked['linked']) == grow_targets_filters[totals_col].sum()
assert new.index.values.max() in new_linked['linked'].thing_id.values
assert new_linked['linked'].index.values.max() == 5
assert added.isin(new.index).all()
assert not added.isin(basic_df.index).any()
npt.assert_array_equal(added.values, [basic_df.index.values.max() + 1])
def test_tabular_transition_add_and_remove():
data = pd.DataFrame(
{'a': ['x', 'x', 'y', 'y', 'y', 'y', 'y', 'y', 'z', 'z']})
totals = pd.DataFrame(
{'a': ['x', 'y', 'z'],
'total': [3, 1, 10]},
index=[2112, 2112, 2112])
tran = transition.TabularTotalsTransition(totals, 'total')
model = transition.TransitionModel(tran)
new, added, _ = model.transition(data, 2112)
assert len(new) == totals.total.sum()
assert added.is_unique is True
assert new.index.is_unique is True
| bsd-3-clause |
tacaswell/bokeh | bokeh/compat/mplexporter/tools.py | 75 | 1732 | """
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
| bsd-3-clause |
HaliteChallenge/Halite-II | airesources/ML-StarterBot-Python/tsmlstarterbot/train.py | 2 | 5091 | import argparse
import json
import os.path
import zipfile
import numpy as np
import pandas as pd
from tsmlstarterbot.parsing import parse
from tsmlstarterbot.neural_net import NeuralNet
def fetch_data_dir(directory, limit):
"""
Loads up to limit games into Python dictionaries from uncompressed replay files.
"""
replay_files = sorted([f for f in os.listdir(directory) if
os.path.isfile(os.path.join(directory, f)) and f.startswith("replay-")])
if len(replay_files) == 0:
raise Exception("Didn't find any game replays. Please call make games.")
print("Found {} games.".format(len(replay_files)))
print("Trying to load up to {} games ...".format(limit))
loaded_games = 0
all_data = []
for r in replay_files:
full_path = os.path.join(directory, r)
with open(full_path) as game:
game_data = game.read()
game_json_data = json.loads(game_data)
all_data.append(game_json_data)
loaded_games = loaded_games + 1
if loaded_games >= limit:
break
print("{} games loaded.".format(loaded_games))
return all_data
def fetch_data_zip(zipfilename, limit):
"""
Loads up to limit games into Python dictionaries from a zipfile containing uncompressed replay files.
"""
all_jsons = []
with zipfile.ZipFile(zipfilename) as z:
print("Found {} games.".format(len(z.filelist)))
print("Trying to load up to {} games ...".format(limit))
for i in z.filelist[:limit]:
with z.open(i) as f:
lines = f.readlines()
assert len(lines) == 1
d = json.loads(lines[0].decode())
all_jsons.append(d)
print("{} games loaded.".format(len(all_jsons)))
return all_jsons
def main():
parser = argparse.ArgumentParser(description="Halite II training")
parser.add_argument("--model_name", help="Name of the model")
parser.add_argument("--minibatch_size", type=int, help="Size of the minibatch", default=100)
parser.add_argument("--steps", type=int, help="Number of steps in the training", default=100)
parser.add_argument("--data", help="Data directory or zip file containing uncompressed games")
parser.add_argument("--cache", help="Location of the model we should continue to train")
parser.add_argument("--games_limit", type=int, help="Train on up to games_limit games", default=1000)
parser.add_argument("--seed", type=int, help="Random seed to make the training deterministic")
parser.add_argument("--bot_to_imitate", help="Name of the bot whose strategy we want to learn")
parser.add_argument("--dump_features_location", help="Location of hdf file where the features should be stored")
args = parser.parse_args()
# Make deterministic if needed
if args.seed is not None:
np.random.seed(args.seed)
nn = NeuralNet(cached_model=args.cache, seed=args.seed)
if args.data.endswith('.zip'):
raw_data = fetch_data_zip(args.data, args.games_limit)
else:
raw_data = fetch_data_dir(args.data, args.games_limit)
data_input, data_output = parse(raw_data, args.bot_to_imitate, args.dump_features_location)
data_size = len(data_input)
training_input, training_output = data_input[:int(0.85 * data_size)], data_output[:int(0.85 * data_size)]
validation_input, validation_output = data_input[int(0.85 * data_size):], data_output[int(0.85 * data_size):]
training_data_size = len(training_input)
# randomly permute the data
permutation = np.random.permutation(training_data_size)
training_input, training_output = training_input[permutation], training_output[permutation]
print("Initial, cross validation loss: {}".format(nn.compute_loss(validation_input, validation_output)))
curves = []
for s in range(args.steps):
start = (s * args.minibatch_size) % training_data_size
end = start + args.minibatch_size
training_loss = nn.fit(training_input[start:end], training_output[start:end])
if s % 25 == 0 or s == args.steps - 1:
validation_loss = nn.compute_loss(validation_input, validation_output)
print("Step: {}, cross validation loss: {}, training_loss: {}".format(s, validation_loss, training_loss))
curves.append((s, training_loss, validation_loss))
cf = pd.DataFrame(curves, columns=['step', 'training_loss', 'cv_loss'])
fig = cf.plot(x='step', y=['training_loss', 'cv_loss']).get_figure()
# Save the trained model, so it can be used by the bot
current_directory = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(current_directory, os.path.pardir, "models", args.model_name + ".ckpt")
print("Training finished, serializing model to {}".format(model_path))
nn.save(model_path)
print("Model serialized")
curve_path = os.path.join(current_directory, os.path.pardir, "models", args.model_name + "_training_plot.png")
fig.savefig(curve_path)
if __name__ == "__main__":
main()
| mit |
daniel-muthukrishna/DASH | astrodash/sn_processing.py | 1 | 6203 | # Pre-processing class
import sys
from scipy.signal import medfilt
import numpy as np
from astrodash.preprocessing import ReadSpectrumFile, PreProcessSpectrum, ProcessingTools
from astrodash.array_tools import normalise_spectrum, zero_non_overlap_part
def limit_wavelength_range(wave, flux, minWave, maxWave):
minIdx = (np.abs(wave - minWave)).argmin()
maxIdx = (np.abs(wave - maxWave)).argmin()
flux[:minIdx] = np.zeros(minIdx)
flux[maxIdx:] = np.zeros(len(flux) - maxIdx)
return flux
class PreProcessing(object):
""" Pre-processes spectra before training """
def __init__(self, filename, w0, w1, nw):
self.filename = filename
self.w0 = w0
self.w1 = w1
self.nw = nw
self.numSplinePoints = 13
self.processingTools = ProcessingTools()
self.readSpectrumFile = ReadSpectrumFile(filename, w0, w1, nw)
self.preProcess = PreProcessSpectrum(w0, w1, nw)
self.spectrum = self.readSpectrumFile.file_extension()
if len(self.spectrum) == 3:
self.redshiftFromFile = True
else:
self.redshiftFromFile = False
def two_column_data(self, z, smooth, minWave, maxWave):
if self.redshiftFromFile is True:
self.wave, self.flux, z = self.spectrum
else:
self.wave, self.flux = self.spectrum
self.flux = normalise_spectrum(self.flux)
self.flux = limit_wavelength_range(self.wave, self.flux, minWave, maxWave)
self.wDensity = (self.w1 - self.w0) / self.nw # Average wavelength spacing
wavelengthDensity = (max(self.wave) - min(self.wave)) / len(self.wave)
filterSize = int(self.wDensity / wavelengthDensity * smooth / 2) * 2 + 1
preFiltered = medfilt(self.flux, kernel_size=filterSize)
wave, deredshifted = self.readSpectrumFile.two_col_input_spectrum(self.wave, preFiltered, z)
if len(wave) < 2:
sys.exit("The redshifted spectrum of file: {0} is out of the classification range between {1} to {2} "
"Angstroms. Please remove this file from classification or reduce the redshift before re-running "
"the program.".format(self.filename, self.w0, self.w1))
binnedwave, binnedflux, minIndex, maxIndex = self.preProcess.log_wavelength(wave, deredshifted)
newflux, continuum = self.preProcess.continuum_removal(binnedwave, binnedflux, self.numSplinePoints, minIndex,
maxIndex)
meanzero = self.preProcess.mean_zero(newflux, minIndex, maxIndex)
apodized = self.preProcess.apodize(meanzero, minIndex, maxIndex)
# filterSize = smooth * 2 + 1
medianFiltered = medfilt(apodized, kernel_size=1) # filterSize)
fluxNorm = normalise_spectrum(medianFiltered)
fluxNorm = zero_non_overlap_part(fluxNorm, minIndex, maxIndex, outerVal=0.5)
# # PAPER PLOTS
# import matplotlib.pyplot as plt
#
# plt.figure(num=1, figsize=(10, 6))
# plt.plot(self.wave, self.flux, label='Raw', linewidth=1.8)
# plt.plot(self.wave, preFiltered, label='Filtered', linewidth=1.8)
# plt.ylim(-8, 8)
# plt.xlabel('Wavelength ($\mathrm{\AA}$)', fontsize=19)
# plt.ylabel('Relative Flux', fontsize=19)
# plt.legend(frameon=False, fontsize=15, loc=1)
# plt.xlim(2500, 9000)
# plt.tick_params(labelsize=16)
# plt.tight_layout()
# plt.axhline(0, color='black', linewidth=0.5)
# plt.savefig('/Users/danmuth/OneDrive/Documents/DASH/Paper/Figures/Filtering.pdf')
#
# plt.figure(num=2, figsize=(10, 6))
# plt.plot(wave, deredshifted, label='De-redshifted', linewidth=1.8)
# plt.plot(binnedwave, binnedflux, label='Log-wavelength binned', linewidth=1.8)
# plt.xlabel('Wavelength ($\mathrm{\AA}$)', fontsize=19)
# plt.ylabel('Relative Flux', fontsize=19)
# plt.legend(frameon=False, fontsize=15, loc=1)
# plt.xlim(2500, 9000)
# plt.tick_params(labelsize=16)
# plt.tight_layout()
# plt.axhline(0, color='black', linewidth=0.5)
# plt.savefig('/Users/danmuth/OneDrive/Documents/DASH/Paper/Figures/Deredshifting.pdf')
#
# plt.figure(num=3, figsize=(10, 6))
# plt.plot(binnedwave, binnedflux, label='Log-wavelength binned', linewidth=1.8)
# plt.plot(binnedwave, continuum, label='Continuum', linewidth=1.8)
# plt.plot(binnedwave, newflux, label='Continuum divided', linewidth=1.8)
# plt.xlabel('Wavelength ($\mathrm{\AA}$)', fontsize=19)
# plt.ylabel('Relative Flux', fontsize=19)
# plt.legend(frameon=False, fontsize=15, loc=1)
# plt.xlim(2500, 9000)
# plt.tick_params(labelsize=16)
# plt.tight_layout()
# plt.axhline(0, color='black', linewidth=0.5)
# plt.savefig('/Users/danmuth/OneDrive/Documents/DASH/Paper/Figures/Continuum.pdf')
#
# plt.figure(num=4, figsize=(10, 6))
# plt.plot(binnedwave, meanzero, label='Continuum divided', linewidth=1.8)
# plt.plot(binnedwave, apodized, label='Apodized', linewidth=1.8)
# # fluxNorm = (apodized - min(apodized)) / (max(apodized) - min(apodized))
# plt.plot(binnedwave, fluxNorm, label='Normalised', linewidth=1.8)
# plt.xlabel('Wavelength ($\mathrm{\AA}$)', fontsize=19)
# plt.ylabel('Relative Flux', fontsize=19)
# plt.legend(frameon=False, fontsize=15, loc=1)
# plt.xlim(2500, 9000)
# plt.tick_params(labelsize=16)
# plt.tight_layout()
# plt.axhline(0, color='black', linewidth=0.5)
# plt.savefig('/Users/danmuth/OneDrive/Documents/DASH/Paper/Figures/Apodize.pdf')
#
# plt.show()
return binnedwave, fluxNorm, minIndex, maxIndex, z
if __name__ == '__main__':
fData = '/Users/danmuth/PycharmProjects/astrodash/templates/OzDES_data/ATEL_9570_Run25/DES16C2ma_C2_combined_160926_v10_b00.dat'
preData = PreProcessing(fData, 3000, 10000, 1024)
waveData, fluxData, minIData, maxIData, z = preData.two_column_data(0.24, 5, 3000, 10000)
| mit |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/io/gbq.py | 9 | 27054 | import warnings
from datetime import datetime
import json
import logging
from time import sleep
import uuid
import numpy as np
from distutils.version import StrictVersion
from pandas import compat
from pandas.core.api import DataFrame
from pandas.tools.merge import concat
from pandas.core.common import PandasError
from pandas.util.decorators import deprecate
from pandas.compat import lzip, bytes_to_str
def _check_google_client_version():
try:
import pkg_resources
except ImportError:
raise ImportError('Could not import pkg_resources (setuptools).')
if compat.PY3:
google_api_minimum_version = '1.4.1'
else:
google_api_minimum_version = '1.2.0'
_GOOGLE_API_CLIENT_VERSION = pkg_resources.get_distribution('google-api-python-client').version
if StrictVersion(_GOOGLE_API_CLIENT_VERSION) < StrictVersion(google_api_minimum_version):
raise ImportError("pandas requires google-api-python-client >= {0} for Google BigQuery support, "
"current version {1}".format(google_api_minimum_version, _GOOGLE_API_CLIENT_VERSION))
logger = logging.getLogger('pandas.io.gbq')
logger.setLevel(logging.ERROR)
class AccessDenied(PandasError, ValueError):
"""
Raised when invalid credentials are provided, or tokens have expired.
"""
pass
class DatasetCreationError(PandasError, ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(PandasError, ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(PandasError, ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(PandasError, ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(PandasError, ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(PandasError, ValueError):
"""
Raised when the project_id, table or dataset provided in the query could not be found.
"""
pass
class StreamingInsertError(PandasError, ValueError):
"""
Raised when BigQuery reports a streaming insert error.
For more information see `Streaming Data Into BigQuery
<https://cloud.google.com/bigquery/streaming-data-into-bigquery>`__
"""
class TableCreationError(PandasError, ValueError):
"""
Raised when the create table method fails
"""
pass
class GbqConnector(object):
def __init__(self, project_id, reauth=False):
self.test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.credentials = self.get_credentials()
self.service = self.get_service(self.credentials)
def test_google_api_imports(self):
try:
import httplib2
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run_flow, argparser
except ImportError as e:
raise ImportError("Missing module required for Google BigQuery support: {0}".format(str(e)))
def get_credentials(self):
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run_flow, argparser
_check_google_client_version()
flow = OAuth2WebServerFlow(client_id='495642085510-k0tmvj2m941jhre2nbqka17vqpjfddtd.apps.googleusercontent.com',
client_secret='kOc9wMptUtxkcIFbtZCcrEAc',
scope='https://www.googleapis.com/auth/bigquery',
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
storage = Storage('bigquery_credentials.dat')
credentials = storage.get()
if credentials is None or credentials.invalid or self.reauth:
credentials = run_flow(flow, storage, argparser.parse_args([]))
return credentials
@staticmethod
def get_service(credentials):
import httplib2
from apiclient.discovery import build
_check_google_client_version()
http = httplib2.Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
return bigquery_service
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors <https://cloud.google.com/bigquery/troubleshooting-errors>`__
status = json.loads(bytes_to_str(ex.content))['error']
errors = status.get('errors', None)
if errors:
for error in errors:
reason = error['reason']
message = error['message']
raise GenericGBQException("Reason: {0}, Message: {1}".format(reason, message))
raise GenericGBQException(errors)
@staticmethod
def process_insert_errors(insert_errors, verbose):
for insert_error in insert_errors:
row = insert_error['index']
errors = insert_error.get('errors', None)
for error in errors:
reason = error['reason']
message = error['message']
location = error['location']
error_message = 'Error at Row: {0}, Reason: {1}, Location: {2}, Message: {3}'.format(row, reason, location, message)
# Report all error messages if verbose is set
if verbose:
print(error_message)
else:
raise StreamingInsertError(error_message + '\nEnable verbose logging to see all errors')
raise StreamingInsertError
def run_query(self, query, verbose=True):
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
_check_google_client_version()
job_collection = self.service.jobs()
job_data = {
'configuration': {
'query': {
'query': query
# 'allowLargeResults', 'createDisposition', 'preserveNulls', destinationTable, useQueryCache
}
}
}
try:
query_reply = job_collection.insert(projectId=self.project_id, body=job_data).execute()
except AccessTokenRefreshError:
raise AccessDenied("The credentials have been revoked or expired, please re-run the application "
"to re-authorize")
except HttpError as ex:
self.process_http_error(ex)
job_reference = query_reply['jobReference']
while not query_reply.get('jobComplete', False):
if verbose:
print('Waiting for job to complete...')
try:
query_reply = job_collection.getQueryResults(projectId=job_reference['projectId'],
jobId=job_reference['jobId']).execute()
except HttpError as ex:
self.process_http_error(ex)
total_rows = int(query_reply['totalRows'])
result_pages = list()
seen_page_tokens = list()
current_row = 0
# Only read schema on first page
schema = query_reply['schema']
# Loop through each page of data
while 'rows' in query_reply and current_row < total_rows:
page = query_reply['rows']
result_pages.append(page)
current_row += len(page)
page_token = query_reply.get('pageToken', None)
if not page_token and current_row < total_rows:
raise InvalidPageToken(
"Required pageToken was missing. Received {0} of {1} rows".format(current_row,
total_rows))
elif page_token in seen_page_tokens:
raise InvalidPageToken("A duplicate pageToken was returned")
seen_page_tokens.append(page_token)
try:
query_reply = job_collection.getQueryResults(
projectId=job_reference['projectId'],
jobId=job_reference['jobId'],
pageToken=page_token).execute()
except HttpError as ex:
self.process_http_error(ex)
if current_row < total_rows:
raise InvalidPageToken()
return schema, result_pages
def load_data(self, dataframe, dataset_id, table_id, chunksize, verbose):
from apiclient.errors import HttpError
job_id = uuid.uuid4().hex
rows = []
remaining_rows = len(dataframe)
if verbose:
total_rows = remaining_rows
print("\n\n")
for index, row in dataframe.reset_index(drop=True).iterrows():
row_dict = dict()
row_dict['json'] = json.loads(row.to_json(force_ascii=False,
date_unit='s',
date_format='iso'))
row_dict['insertId'] = job_id + str(index)
rows.append(row_dict)
remaining_rows -= 1
if (len(rows) % chunksize == 0) or (remaining_rows == 0):
if verbose:
print("\rStreaming Insert is {0}% Complete".format(((total_rows - remaining_rows) * 100) / total_rows))
body = {'rows': rows}
try:
response = self.service.tabledata().insertAll(
projectId = self.project_id,
datasetId = dataset_id,
tableId = table_id,
body = body).execute()
except HttpError as ex:
self.process_http_error(ex)
# For streaming inserts, even if you receive a success HTTP response code, you'll need to check the
# insertErrors property of the response to determine if the row insertions were successful, because
# it's possible that BigQuery was only partially successful at inserting the rows.
# See the `Success HTTP Response Codes <https://cloud.google.com/bigquery/streaming-data-into-bigquery#troubleshooting>`__
# section
insert_errors = response.get('insertErrors', None)
if insert_errors:
self.process_insert_errors(insert_errors, verbose)
sleep(1) # Maintains the inserts "per second" rate per API
rows = []
if verbose:
print("\n")
def verify_schema(self, dataset_id, table_id, schema):
from apiclient.errors import HttpError
try:
return (self.service.tables().get(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id
).execute()['schema']) == schema
except HttpError as ex:
self.process_http_error(ex)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema, verbose):
delay = 0
# Changes to table schema may take up to 2 minutes as of May 2015
# See `Issue 191 <https://code.google.com/p/google-bigquery/issues/detail?id=191>`__
# Compare previous schema with new schema to determine if there should be a 120 second delay
if not self.verify_schema(dataset_id, table_id, table_schema):
if verbose:
print('The existing table has a different schema. Please wait 2 minutes. See Google BigQuery issue #191')
delay = 120
table = _Table(self.project_id, dataset_id)
table.delete(table_id)
table.create(table_id, table_schema)
sleep(delay)
def _parse_data(schema, rows):
# see: http://pandas.pydata.org/pandas-docs/dev/missing_data.html#missing-data-casting-rules-and-indexing
dtype_map = {'INTEGER': np.dtype(float),
'FLOAT': np.dtype(float),
'TIMESTAMP': 'M8[ns]'} # This seems to be buggy without nanosecond indicator
fields = schema['fields']
col_types = [field['type'] for field in fields]
col_names = [str(field['name']) for field in fields]
col_dtypes = [dtype_map.get(field['type'], object) for field in fields]
page_array = np.zeros((len(rows),),
dtype=lzip(col_names, col_dtypes))
for row_num, raw_row in enumerate(rows):
entries = raw_row.get('f', [])
for col_num, field_type in enumerate(col_types):
field_value = _parse_entry(entries[col_num].get('v', ''),
field_type)
page_array[row_num][col_num] = field_value
return DataFrame(page_array, columns=col_names)
def _parse_entry(field_value, field_type):
if field_value is None or field_value == 'null':
return None
if field_type == 'INTEGER' or field_type == 'FLOAT':
return float(field_value)
elif field_type == 'TIMESTAMP':
timestamp = datetime.utcfromtimestamp(float(field_value))
return np.datetime64(timestamp)
elif field_type == 'BOOLEAN':
return field_value == 'true'
return field_value
def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False, verbose=True):
"""Load data from Google BigQuery.
THIS IS AN EXPERIMENTAL LIBRARY
The main method a user calls to execute a Query in Google BigQuery and read results
into a pandas DataFrame using the v2 Google API client for Python. Documentation for
the API is available at https://developers.google.com/api-client-library/python/.
Authentication to the Google BigQuery service is via OAuth 2.0 using the product name
'pandas GBQ'.
Parameters
----------
query : str
SQL-Like Query to return data values
project_id : str
Google BigQuery Account project ID.
index_col : str (optional)
Name of result column to use for index in results DataFrame
col_order : list(str) (optional)
List of BigQuery column names in the desired order for results
DataFrame
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
verbose : boolean (default True)
Verbose output
Returns
-------
df: DataFrame
DataFrame representing results of query
"""
if not project_id:
raise TypeError("Missing required parameter: project_id")
connector = GbqConnector(project_id, reauth=reauth)
schema, pages = connector.run_query(query, verbose=verbose)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(_parse_data(schema, page))
if len(dataframe_list) > 0:
final_df = concat(dataframe_list, ignore_index=True)
else:
final_df = _parse_data(schema, [])
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidColumnOrder(
'Index column "{0}" does not exist in DataFrame.'
.format(index_col)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
'Column order does not match this DataFrame.'
)
# Downcast floats to integers and objects to booleans
# if there are no NaN's. This is presently due to a
# limitation of numpy in handling missing data.
final_df._data = final_df._data.downcast(dtypes='infer')
return final_df
def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail'):
"""Write a DataFrame to a Google BigQuery table.
THIS IS AN EXPERIMENTAL LIBRARY
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if '.' not in destination_table:
raise NotFoundException("Invalid Table Name. Should be of the form 'datasetId.tableId' ")
connector = GbqConnector(project_id, reauth=reauth)
dataset_id, table_id = destination_table.rsplit('.', 1)
table = _Table(project_id, dataset_id, reauth=reauth)
table_schema = _generate_bq_schema(dataframe)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == 'fail':
raise TableCreationError("Could not create the table because it already exists. "
"Change the if_exists parameter to append or replace data.")
elif if_exists == 'replace':
connector.delete_and_recreate_table(dataset_id, table_id, table_schema, verbose)
elif if_exists == 'append':
if not connector.verify_schema(dataset_id, table_id, table_schema):
raise InvalidSchema("Please verify that the column order, structure and data types in the DataFrame "
"match the schema of the destination table.")
else:
table.create(table_id, table_schema)
connector.load_data(dataframe, dataset_id, table_id, chunksize, verbose)
def generate_bq_schema(df, default_type='STRING'):
# deprecation TimeSeries, #11121
warnings.warn("generate_bq_schema is deprecated and will be removed in a future version",
FutureWarning, stacklevel=2)
return _generate_bq_schema(df, default_type=default_type)
def _generate_bq_schema(df, default_type='STRING'):
""" Given a passed df, generate the associated Google BigQuery schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
type_mapping = {
'i': 'INTEGER',
'b': 'BOOLEAN',
'f': 'FLOAT',
'O': 'STRING',
'S': 'STRING',
'U': 'STRING',
'M': 'TIMESTAMP'
}
fields = []
for column_name, dtype in df.dtypes.iteritems():
fields.append({'name': column_name,
'type': type_mapping.get(dtype.kind, default_type)})
return {'fields': fields}
class _Table(GbqConnector):
def __init__(self, project_id, dataset_id, reauth=False):
from apiclient.errors import HttpError
self.test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.credentials = self.get_credentials()
self.service = self.get_service(self.credentials)
self.http_error = HttpError
self.dataset_id = dataset_id
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
.. versionadded:: 0.17.0
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
try:
self.service.tables().get(
projectId=self.project_id,
datasetId=self.dataset_id,
tableId=table_id).execute()
return True
except self.http_error as ex:
if ex.resp.status == 404:
return False
else:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
.. versionadded:: 0.17.0
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a dataframe.
"""
if self.exists(table_id):
raise TableCreationError("The table could not be created because it already exists")
if not _Dataset(self.project_id).exists(self.dataset_id):
_Dataset(self.project_id).create(self.dataset_id)
body = {
'schema': schema,
'tableReference': {
'tableId': table_id,
'projectId': self.project_id,
'datasetId': self.dataset_id
}
}
try:
self.service.tables().insert(
projectId=self.project_id,
datasetId=self.dataset_id,
body=body).execute()
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
.. versionadded:: 0.17.0
Parameters
----------
table : str
Name of table to be deleted
"""
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
try:
self.service.tables().delete(
datasetId=self.dataset_id,
projectId=self.project_id,
tableId=table_id).execute()
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(self, project_id, reauth=False):
from apiclient.errors import HttpError
self.test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.credentials = self.get_credentials()
self.service = self.get_service(self.credentials)
self.http_error = HttpError
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
.. versionadded:: 0.17.0
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
try:
self.service.datasets().get(
projectId=self.project_id,
datasetId=dataset_id).execute()
return True
except self.http_error as ex:
if ex.resp.status == 404:
return False
else:
self.process_http_error(ex)
def datasets(self):
""" Return a list of datasets in Google BigQuery
.. versionadded:: 0.17.0
Parameters
----------
None
Returns
-------
list
List of datasets under the specific project
"""
try:
list_dataset_response = self.service.datasets().list(
projectId=self.project_id).execute().get('datasets', None)
if not list_dataset_response:
return []
dataset_list = list()
for row_num, raw_row in enumerate(list_dataset_response):
dataset_list.append(raw_row['datasetReference']['datasetId'])
return dataset_list
except self.http_error as ex:
self.process_http_error(ex)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
.. versionadded:: 0.17.0
Parameters
----------
dataset : str
Name of dataset to be written
"""
if self.exists(dataset_id):
raise DatasetCreationError("The dataset could not be created because it already exists")
body = {
'datasetReference': {
'projectId': self.project_id,
'datasetId': dataset_id
}
}
try:
self.service.datasets().insert(
projectId=self.project_id,
body=body).execute()
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, dataset_id):
""" Delete a dataset in Google BigQuery
.. versionadded:: 0.17.0
Parameters
----------
dataset : str
Name of dataset to be deleted
"""
if not self.exists(dataset_id):
raise NotFoundException("Dataset {0} does not exist".format(dataset_id))
try:
self.service.datasets().delete(
datasetId=dataset_id,
projectId=self.project_id).execute()
except self.http_error as ex:
self.process_http_error(ex)
def tables(self, dataset_id):
""" List tables in the specific dataset in Google BigQuery
.. versionadded:: 0.17.0
Parameters
----------
dataset : str
Name of dataset to list tables for
Returns
-------
list
List of tables under the specific dataset
"""
try:
list_table_response = self.service.tables().list(
projectId=self.project_id,
datasetId=dataset_id).execute().get('tables', None)
if not list_table_response:
return []
table_list = list()
for row_num, raw_row in enumerate(list_table_response):
table_list.append(raw_row['tableReference']['tableId'])
return table_list
except self.http_error as ex:
self.process_http_error(ex)
| mit |
evanbiederstedt/RRBSfun | epiphen/cll_tests/total_CLL_chrX.py | 1 | 8305 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chrX_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC",
"RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC",
"RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG",
"RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GACACG",
"RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC",
"RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG",
"RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG",
"RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC",
"RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG",
"RBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC",
"RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG",
"RRBS_trito_pool_1_TAAGGCGA.ACAACC",
"RRBS_trito_pool_1_TAAGGCGA.ACGTGG",
"RRBS_trito_pool_1_TAAGGCGA.ACTCAC",
"RRBS_trito_pool_1_TAAGGCGA.ATAGCG",
"RRBS_trito_pool_1_TAAGGCGA.ATCGAC",
"RRBS_trito_pool_1_TAAGGCGA.CAAGAG",
"RRBS_trito_pool_1_TAAGGCGA.CATGAC",
"RRBS_trito_pool_1_TAAGGCGA.CCTTCG",
"RRBS_trito_pool_1_TAAGGCGA.CGGTAG",
"RRBS_trito_pool_1_TAAGGCGA.CTATTG",
"RRBS_trito_pool_1_TAAGGCGA.GACACG",
"RRBS_trito_pool_1_TAAGGCGA.GCATTC",
"RRBS_trito_pool_1_TAAGGCGA.GCTGCC",
"RRBS_trito_pool_1_TAAGGCGA.GGCATC",
"RRBS_trito_pool_1_TAAGGCGA.GTGAGG",
"RRBS_trito_pool_1_TAAGGCGA.GTTGAG",
"RRBS_trito_pool_1_TAAGGCGA.TAGCGG",
"RRBS_trito_pool_1_TAAGGCGA.TATCTC",
"RRBS_trito_pool_1_TAAGGCGA.TCTCTG",
"RRBS_trito_pool_1_TAAGGCGA.TGACAG",
"RRBS_trito_pool_1_TAAGGCGA.TGCTGC",
"RRBS_trito_pool_2_CGTACTAG.ACAACC",
"RRBS_trito_pool_2_CGTACTAG.ACGTGG",
"RRBS_trito_pool_2_CGTACTAG.ACTCAC",
"RRBS_trito_pool_2_CGTACTAG.AGGATG",
"RRBS_trito_pool_2_CGTACTAG.ATAGCG",
"RRBS_trito_pool_2_CGTACTAG.ATCGAC",
"RRBS_trito_pool_2_CGTACTAG.CAAGAG",
"RRBS_trito_pool_2_CGTACTAG.CATGAC",
"RRBS_trito_pool_2_CGTACTAG.CCTTCG",
"RRBS_trito_pool_2_CGTACTAG.CGGTAG",
"RRBS_trito_pool_2_CGTACTAG.CTATTG",
"RRBS_trito_pool_2_CGTACTAG.GACACG",
"RRBS_trito_pool_2_CGTACTAG.GCATTC",
"RRBS_trito_pool_2_CGTACTAG.GCTGCC",
"RRBS_trito_pool_2_CGTACTAG.GGCATC",
"RRBS_trito_pool_2_CGTACTAG.GTGAGG",
"RRBS_trito_pool_2_CGTACTAG.GTTGAG",
"RRBS_trito_pool_2_CGTACTAG.TAGCGG",
"RRBS_trito_pool_2_CGTACTAG.TATCTC",
"RRBS_trito_pool_2_CGTACTAG.TCTCTG",
"RRBS_trito_pool_2_CGTACTAG.TGACAG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/CLL_tests")
tott.to_csv("total_CLL_chromX.phy", header=None, index=None)
print(tott.shape)
| mit |
eulerreich/keras | tests/manual/check_callbacks.py | 82 | 7540 | import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
| mit |
shikhardb/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
hariseldon99/archives | isingrand/scripts/isingrand_esys.py | 1 | 6104 | #!/usr/bin/python
"""
Created on Tues Mar 4 2014
@author: Analabha Roy ([email protected])
Usage : ./isingrand_esys.py -h
"""
import numpy as np
import numpy.random as nprand
import numpy.linalg as nplinalg
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import sys, getopt, time
"""
Default Parameters are entered here
"""
#Lattice size
L = 7
#Transverse field
alpha = 10.0
#Random number seed
seed = 7
out_fname_evals = "isingrand_evals.npy"
out_fname_evecs = "isingrand_evecs.npy"
out_fname_mags = 'mags_ebasis_tfield_%f_latsize_%d.txt'
out_fname_cc = 'initstate_ebasis_tfield_%f_latsize_%d.txt'
helpstring = """Usage:\n\n ./isingrand_esys.py
-l <lattice size>
-s <random no generator seed>
-f <transverse field>
-e </path/to/evals/outfile>
-u </path/to/evecs/outfile>
-m </path/to/mags/outfile>
-v <verbose>"""
#Pauli matrices
sx,sy,sz = np.array([[0,1],[1,0]]),np.array([[0,-1j],[1j,0]]),\
np.array([[1,0],[0,-1]])
def kemat(i):
#Left Hand Side
if(i == 1):
ke = np.kron(sx,sx)
else:
dim = 2**(i-1)
ke = np.kron(np.eye(dim,dim),sx)
ke = np.kron(ke,sx)
#Right Hand Side
if(i < L-1):
dim = 2**(L-i-1)
ke = np.kron(ke,np.eye(dim,dim))
return(ke)
def nummat(i):
#Left Hand Side
if(i==1):
num = sz
else:
dim = 2**(i-1)
num = np.kron(np.eye(dim,dim),sz)
#Right Hand Side
if(i < L):
dim = 2**(L-i)
num = np.kron(num,np.eye(dim,dim))
return(num)
def diagonalize(H):
try:
evals, U = nplinalg.eigh(H)
idx = evals.argsort()
evals = evals[idx]
U = U[:,idx]
except nplinalg.linalg.LinAlgError:
evals, U = 0.0,0.0
return(evals, U)
if __name__ == '__main__':
verbosity = 0
try:
opts, args = getopt.getopt(sys.argv[1:] , "hl:s:f:e:u:m:v")
except getopt.GetoptError:
print helpstring
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print helpstring
sys.exit(1)
elif opt == '-l':
L = int(arg)
elif opt == '-s':
seed = int(arg)
elif opt == '-f':
alpha = float(arg)
elif opt == '-e':
out_fname_evals = arg
elif opt == '-u':
out_fname_evecs = arg
elif opt == '-m':
out_fname_mags = arg
elif opt == '-v':
verbosity = 1
print "\n\nFor usage help, run ./isingrand_esys.py -h\n\n"
print "Executing diagonalization with parameters:"
print "Lattice Size = " , L
print "Trasnverse field = " , alpha
print "\n\n ... Please Wait ...\n\n"
print "-----------------------------------"
print "Tfield |", "Gndst |" , "Mag avg |" , "time (s)"
print "-----------------------------------"
numstates = np.delete(np.arange(L+1),0)
limits = (-2**L/100,2**L)
start_time = time.time()
nprand.seed(seed)
hdata = np.array(nprand.rand(L))
jdata = np.array(nprand.rand(L))
H = np.zeros((2**L,2**L))
for i in numstates:
H = H - 2.0 * alpha * hdata[i-1] * nummat(i)
if(i < L-1):
H = H - jdata[i-1] * kemat(i)
evals, evecs = diagonalize(H)
if evals.all != 0.0 and evecs.all != 0.0 :
#Compute magnetization diagonal components m_{aa}|<apsi_0>|^2
magop = np.zeros((2**L,2**L))
for i in numstates:
magop = magop + nummat(i)
#mag_aa is the array of m_aa = <a|M|a> for eigenvector |a>
mag_aa = np.diagonal(np.dot(np.transpose(evecs),np.dot(magop,evecs)))
#Assume that psi_0 is all spins up ie (1 0 0 0 ... 0)
#Then <a|psi_0> is just the complex conjugate of the first element of
#|a>
#So, the array of | <a|psi_0>|^2 is just the first row of evecs mod -
#squared
diag_cc = np.abs(evecs[0,:])**2
mags = np.multiply(mag_aa,diag_cc)
np.set_printoptions(precision=3)
print "%2.3f | %2.3f | %2.3f | %2.3f" % (alpha, \
np.min(np.abs(evals.real)), np.sum(mags)/L ,time.time() - start_time)
print "-----------------------------------"
if(verbosity == 1):
#Plot the eigenvalues and eigenvectors and magnetization components
plt.plot(evals.real/L)
plt.title('Eigenvalues - Sorted')
plt.matshow(np.abs(evecs)**2,interpolation='nearest',\
cmap=cm.coolwarm)
plt.title('Eigenvectors - Sorted' )
plt.colorbar()
fig, ax = plt.subplots()
plt.bar(np.delete(np.arange(2**L+1),0),diag_cc,edgecolor='blue')
plt.xlim(limits)
plt.text(L,-np.max(diag_cc)/10.0,'Transverse field = %lf'%alpha,\
horizontalalignment='center')
#plt.title('Transverse field = %lf'%alpha )
plt.xscale('log')
ax.xaxis.tick_top()
ax.set_xlabel('Lexicographical order of eigenstate')
ax.xaxis.set_label_position('top')
ax.set_ylabel('Initial state probability wrt eigenstate')
plt.show()
print "\nDumping data to file" , out_fname_evals , " and " , \
out_fname_evecs , "..."
np.save(out_fname_evals,evals)
np.save(out_fname_evecs, evecs)
out_fname_mags = out_fname_mags % (alpha,L)
print "\nDumping outputs to files" , out_fname_mags , " and " ,\
out_fname_cc ,"..."
outdat = np.vstack((np.delete(np.arange(2**L+1),0),mags)).T
np.savetxt(out_fname_mags,outdat,delimiter=' ')
out_fname_cc = out_fname_cc % (alpha,L)
outdat = np.vstack((np.delete(np.arange(2**L+1),0),diag_cc)).T
np.savetxt(out_fname_cc,outdat,delimiter=' ')
print 'Done'
else:
print "Error! Eigenvalues did not converge for this parameter,\
skipping ..." | gpl-2.0 |
dvro/scikit-protopy | examples/plot_generation_example.py | 1 | 4410 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================
Prototype Selection and Generation Comparision
==============================================
A comparison of a several prototype selection and generation algorithms in
the project on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
after applying instance reduction techniques.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
In particular in high dimensional spaces data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization.
The plots show training points in solid colors and testing points
semi-transparent.
The lower right shows:
- S: score on the traning set.
- R: reduction ratio.
License: BSD 3 clause
"""
print(__doc__)
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from protopy.selection.enn import ENN
from protopy.selection.cnn import CNN
from protopy.selection.renn import RENN
from protopy.selection.allknn import AllKNN
from protopy.selection.tomek_links import TomekLinks
from protopy.generation.sgp import SGP, SGP2, ASGP
import utils
h = .02 # step size in the mesh
names = ["KNN", "SGP", "SGP2", "ASGP"]
r_min, r_mis = 0.15, 0.15
classifiers = [
KNeighborsClassifier(1),
SGP(r_min=r_min, r_mis=r_mis),
SGP2(r_min=r_min, r_mis=r_mis),
ASGP(r_min=r_min, r_mis=r_mis)]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = pl.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
X, y = utils.generate_imbalance(X, y, positive_label=1, ir=1.5)
# just plot the dataset first
cm = pl.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = pl.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = pl.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(np.array(X), np.array(y))
red = clf.reduction_ if hasattr(clf, 'reduction_') else 0.0
if hasattr(clf, 'reduction_'):
X_prot, y_prot = clf.X_, clf.y_
else:
X_prot, y_prot = X, y
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the prototypes
ax.scatter(X_prot[:, 0], X_prot[:, 1], c=y_prot, cmap=cm_bright)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, 'R:' + ('%.2f' % red).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
pl.show()
| bsd-2-clause |
nidishrajendran/computational-politeness | model.py | 3 | 3241 |
import sys
import os
import cPickle
"""
This file provides an interface to
a pre-trained politeness SVM.
"""
#####
# Ensure the proper python dependencies exist
try:
import numpy as np
except:
sys.stderr.write("Package not found: Politeness model requires python package numpy\n")
sys.exit(2)
try:
import scipy
from scipy.sparse import csr_matrix
except:
sys.stderr.write("Package not found: Politeness model requires python package scipy\n")
sys.exit(2)
try:
import sklearn
except:
sys.stderr.write("Package not found: Politeness model requires python package scikit-learn\n")
sys.exit(2)
try:
import nltk
except:
sys.stderr.write("Package not found: Politeness model requires python package nltk\n")
sys.exit(2)
####
# Check versions for sklearn, scipy, numpy, nltk
# Don't error out, just notify
packages2versions = [("scikit-learn", sklearn, "0.15.1"), ("numpy", np, "1.9.0"), ("nltk", nltk, "3.0.0"), ("scipy", scipy, "0.12.0")]
for name, package, expected_v in packages2versions:
if package.__version__ < expected_v:
sys.stderr.write("Warning: package '%s', expected version >= %s, detected %s. Code functionality not guaranteed.\n" % (name, expected_v, package.__version__))
####
from features.vectorizer import PolitenessFeatureVectorizer
####
# Serialized model filename
MODEL_FILENAME = os.path.join(os.path.split(__file__)[0], 'politeness-svm.p')
####
# Load model, initialize vectorizer
clf = cPickle.load(open(MODEL_FILENAME))
vectorizer = PolitenessFeatureVectorizer()
def score(request):
"""
:param request - The request document to score
:type request - dict with 'sentences' and 'parses' field
sample (taken from test_documents.py)--
{
'sentences': [
"Have you found the answer for your question?",
"If yes would you please share it?"
],
'parses': [
["csubj(found-3, Have-1)", "dobj(Have-1, you-2)", "root(ROOT-0, found-3)", "det(answer-5, the-4)", "dobj(found-3, answer-5)", "poss(question-8, your-7)", "prep_for(found-3, question-8)"],
["prep_if(would-3, yes-2)", "root(ROOT-0, would-3)", "nsubj(would-3, you-4)", "ccomp(would-3, please-5)", "nsubj(it-7, share-6)", "xcomp(please-5, it-7)"]
]
}
returns class probabilities as a dict
{
'polite': float,
'impolite': float
}
"""
# vectorizer returns {feature-name: value} dict
features = vectorizer.features(request)
fv = [features[f] for f in sorted(features.iterkeys())]
# Single-row sparse matrix
X = csr_matrix(np.asarray([fv]))
probs = clf.predict_proba(X)
# Massage return format
probs = {"polite": probs[0][1], "impolite": probs[0][0]}
return probs
if __name__ == "__main__":
"""
Sample classification of requests
"""
from test_documents import TEST_DOCUMENTS
for doc in TEST_DOCUMENTS:
probs = score(doc)
print "===================="
print "Text: ", doc['text']
print "\tP(polite) = %.3f" % probs['polite']
print "\tP(impolite) = %.3f" % probs['impolite']
print "\n"
| apache-2.0 |
ryanraaum/african-mtdna | popdata_sources/tofanelli2009/process.py | 1 | 1397 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import sys
import csv
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region(metadata.ix[0, 'SeqRange'])
hids = []
groups = []
sites = []
with open('tofanelli2009.csv', 'rU') as f:
reader = csv.reader(f)
reader.next() # skip past header
for row in reader:
hids.append(row[0])
groups.append(row[1])
sites_str = ' '.join(row[3].split('-'))
sites.append(str2sites(sites_str, add16k=True))
## Validate
passed_validation = True
for i in range(len(sites)):
seq = sites2seq(sites[i], region)
mysites = seq2sites(seq)
if not mysites == sites[i]:
myseq = translate(sites2seq(mysites, region), None, '-')
if not seq == myseq:
passed_validation = False
print i, hids[i]
counter = {}
for k in metadata.index:
counter[k] = 0
if passed_validation:
with open('processed.csv', 'w') as f:
for i in range(len(groups)):
key = groups[i]
counter[key] = counter[key] + 1
newid = metadata.ix[key,'NewPrefix'] + str(counter[key]).zfill(3)
seq = sites2seq(sites[i], range2region(metadata.ix[key,'SeqRange']))
seq = translate(seq, None, '-')
mysites = seq2sites(seq)
mysites = ' '.join([str(x) for x in mysites])
f.write('%s,%s,%s\n' % (newid, hids[i], mysites)) | cc0-1.0 |
neuroidss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_pdf.py | 69 | 71773 | # -*- coding: iso-8859-1 -*-
"""
A PDF matplotlib backend (not yet complete)
Author: Jouni K Seppänen <[email protected]>
"""
from __future__ import division
import os
import re
import sys
import time
import warnings
import zlib
import numpy as npy
from cStringIO import StringIO
from datetime import datetime
from math import ceil, cos, floor, pi, sin
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import __version__, rcParams, get_data_path
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import Bunch, is_string_like, reverse_dict, \
get_realpath_and_stat, is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import FT2Font, FIXED_WIDTH, ITALIC, LOAD_NO_SCALE, \
LOAD_NO_HINTING, KERNING_UNFITTED
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, Bbox, BboxBase
from matplotlib.path import Path
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g. font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * the alpha channel of images
# * image compression could be improved (PDF supports png-like compression)
# * encoding of fonts, including mathtext fonts and unicode support
# * Type 1 font support (i.e., "pdf.use_afm")
# * TTF support has lots of small TODOs, e.g. how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
# * use_tex
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(' '.join(strings[lasti:]))
return '\n'.join(result)
_string_escape_regex = re.compile(r'([\\()])')
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, float):
if not npy.isfinite(obj):
raise ValueError, "Can only output finite numbers in PDF"
r = "%.10f" % obj
return r.rstrip('0').rstrip('.')
# Integers are written as such.
elif isinstance(obj, (int, long)):
return "%d" % obj
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif is_string_like(obj):
return '(' + _string_escape_regex.sub(r'\\\1', obj) + ')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = ["<<"]
r.extend(["%s %s" % (Name(key).pdfRepr(), pdfRepr(val))
for key, val in obj.items()])
r.append(">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = ["["]
r.extend([pdfRepr(val) for val in obj])
r.append("]")
return fill(r)
# Booleans.
elif isinstance(obj, bool):
return ['false', 'true'][obj]
# The null keyword.
elif obj is None:
return 'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight: z = time.altzone
else: z = time.timezone
if z == 0: r += 'Z'
elif z < 0: r += "+%02d'%02d'" % ((-z)//3600, (-z)%3600)
else: r += "-%02d'%02d'" % (z//3600, z%3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
raise TypeError, \
"Don't know a PDF representation for %s objects." \
% type(obj)
class Reference:
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return "%d 0 R" % self.id
def write(self, contents, file):
write = file.write
write("%d 0 obj\n" % self.id)
write(pdfRepr(contents))
write("\nendobj\n")
class Name:
"""PDF name object."""
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
self.name = self._regex.sub(Name.hexify, name)
def __repr__(self):
return "<Name %s>" % self.name
def hexify(match):
return '#%02x' % ord(match.group())
hexify = staticmethod(hexify)
def pdfRepr(self):
return '/' + self.name
class Operator:
"""PDF operator object."""
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
# PDF operators (not an exhaustive list)
_pdfops = dict(close_fill_stroke='b', fill_stroke='B', fill='f',
closepath='h', close_stroke='s', stroke='S', endpath='n',
begin_text='BT', end_text='ET',
curveto='c', rectangle='re', lineto='l', moveto='m',
concat_matrix='cm',
use_xobject='Do',
setgray_stroke='G', setgray_nonstroke='g',
setrgb_stroke='RG', setrgb_nonstroke='rg',
setcolorspace_stroke='CS', setcolorspace_nonstroke='cs',
setcolor_stroke='SCN', setcolor_nonstroke='scn',
setdash='d', setlinejoin='j', setlinecap='J', setgstate='gs',
gsave='q', grestore='Q',
textpos='Td', selectfont='Tf', textmatrix='Tm',
show='Tj', showkern='TJ',
setlinewidth='w', clip='W')
Op = Bunch(**dict([(name, Operator(value))
for name, value in _pdfops.items()]))
class Stream:
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
def __init__(self, id, len, file, extra=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header """
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None: self.extra = dict()
else: self.extra = extra
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression']:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = StringIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write("%d 0 obj\n" % self.id)
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write("\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write("\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write("\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile:
"""PDF file with one page."""
def __init__(self, width, height, dpi, filename):
self.width, self.height = width, height
self.dpi = dpi
if rcParams['path.simplify']:
self.simplify = (width * dpi, height * dpi)
else:
self.simplify = None
self.nextObject = 1 # next free object id
self.xrefTable = [ [0, 65535, 'the zero object'] ]
self.passed_in_file_object = False
if is_string_like(filename):
fh = file(filename, 'wb')
elif is_writable_file_like(filename):
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write("%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write("%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.infoObject = self.reserveObject('info')
pagesObject = self.reserveObject('pages')
thePageObject = self.reserveObject('page 0')
contentObject = self.reserveObject('contents of page 0')
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.XObjectObject = self.reserveObject('external objects')
resourceObject = self.reserveObject('resources')
root = { 'Type': Name('Catalog'),
'Pages': pagesObject }
self.writeObject(self.rootObject, root)
info = { 'Creator': 'matplotlib ' + __version__ \
+ ', http://matplotlib.sf.net',
'Producer': 'matplotlib pdf backend',
'CreationDate': datetime.today() }
# Possible TODO: Title, Author, Subject, Keywords
self.writeObject(self.infoObject, info)
pages = { 'Type': Name('Pages'),
'Kids': [ thePageObject ],
'Count': 1 }
self.writeObject(pagesObject, pages)
thePage = { 'Type': Name('Page'),
'Parent': pagesObject,
'Resources': resourceObject,
'MediaBox': [ 0, 0, dpi*width, dpi*height ],
'Contents': contentObject }
self.writeObject(thePageObject, thePage)
# self.fontNames maps filenames to internal font names
self.fontNames = {}
self.nextFont = 1 # next free internal font name
self.fontInfo = {} # information on fonts: metrics, encoding
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
# The PDF spec recommends to include every procset
procsets = [ Name(x)
for x in "PDF Text ImageB ImageC ImageI".split() ]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = { 'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'ProcSet': procsets }
self.writeObject(resourceObject, resources)
# Start the content stream of the page
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
def close(self):
# End the content stream and write out the various deferred
# objects
self.endStream()
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in self.alphaStates.values()]))
self.writeHatches()
xobjects = dict(self.images.values())
for tup in self.markers.values():
xobjects[tup[0]] = tup[1]
for name, value in self.multi_byte_charprocs.items():
xobjects[name] = value
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill(map(pdfRepr, data)))
self.write('\n')
def beginStream(self, id, len, extra=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra)
def endStream(self):
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(fontprop, fontext='afm')
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in self.fontNames.items():
if filename.endswith('.afm'):
fontdictObject = self._write_afm_font(filename)
elif filename.endswith('.pfb') or filename.endswith('.pfa'):
# a Type 1 font; limited support for now
fontdictObject = self.embedType1(filename, self.fontInfo[Fx])
else:
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fontdictObject = self.embedTTF(realpath, chars[1])
fonts[Fx] = fontdictObject
#print >>sys.stderr, filename
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
fh = file(filename)
font = AFM(fh)
fh.close()
fontname = font.get_fontname()
fontdict = { 'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding') }
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedType1(self, filename, fontinfo):
# TODO: font effects such as SlantFont
fh = open(filename, 'rb')
matplotlib.verbose.report(
'Embedding Type 1 font ' + filename, 'debug')
try:
fontdata = fh.read()
finally:
fh.close()
font = FT2Font(filename)
widthsObject, fontdescObject, fontdictObject, fontfileObject = \
[ self.reserveObject(n) for n in
('font widths', 'font descriptor',
'font dictionary', 'font file') ]
firstchar = 0
lastchar = len(fontinfo.widths) - 1
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(font.postscript_name),
'FirstChar': 0,
'LastChar': lastchar,
'Widths': widthsObject,
'FontDescriptor': fontdescObject,
}
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [ Name(ch) for ch in enc ]
differencesArray = [ 0 ] + differencesArray
fontdict.update({
'Encoding': { 'Type': Name('Encoding'),
'Differences': differencesArray },
})
_, _, fullname, familyname, weight, italic_angle, fixed_pitch, \
ul_position, ul_thickness = font.get_ps_font_info()
flags = 0
if fixed_pitch: flags |= 1 << 0 # fixed width
if 0: flags |= 1 << 1 # TODO: serif
if 1: flags |= 1 << 2 # TODO: symbolic (most TeX fonts are)
else: flags |= 1 << 5 # non-symbolic
if italic_angle: flags |= 1 << 6 # italic
if 0: flags |= 1 << 16 # TODO: all caps
if 0: flags |= 1 << 17 # TODO: small caps
if 0: flags |= 1 << 18 # TODO: force bold
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(font.postscript_name),
'Flags': flags,
'FontBBox': font.bbox,
'ItalicAngle': italic_angle,
'Ascent': font.ascender,
'Descent': font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': familyname,
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
#'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdictObject, fontdict)
self.writeObject(widthsObject, fontinfo.widths)
self.writeObject(fontdescObject, descriptor)
t1font = type1font.Type1Font(filename)
self.beginStream(fontfileObject.id, None,
{ 'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0 })
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdictObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = FT2Font(str(filename))
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest: return round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0: return floor(value)
else: return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type' : Name('Font'),
'BaseFont' : ps_name,
'FirstChar' : firstchar,
'LastChar' : lastchar,
'FontDescriptor' : fontdescObject,
'Subtype' : Name('Type3'),
'Name' : descriptor['FontName'],
'FontBBox' : bbox,
'FontMatrix' : [ .001, 0, 0, .001, 0, 0 ],
'CharProcs' : charprocsObject,
'Encoding' : {
'Type' : Name('Encoding'),
'Differences' : differencesArray},
'Widths' : widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
unicode = decode_char(charcode)
width = font.load_char(unicode, flags=LOAD_NO_SCALE|LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [ get_char_width(charcode) for charcode in range(firstchar, lastchar+1) ]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
cmap = font.get_charmap()
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(filename, glyph_ids)
charprocs = {}
charprocsRef = {}
for charname, stream in rawcharprocs.items():
charprocDict = { 'Length': len(stream) }
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find("d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type' : Name('Font'),
'Subtype' : Name('CIDFontType2'),
'BaseFont' : ps_name,
'CIDSystemInfo' : {
'Registry' : 'Adobe',
'Ordering' : 'Identity',
'Supplement' : 0 },
'FontDescriptor' : fontdescObject,
'W' : wObject,
'CIDToGIDMap' : cidToGidMapObject
}
type0FontDict = {
'Type' : Name('Font'),
'Subtype' : Name('Type0'),
'BaseFont' : ps_name,
'Encoding' : Name('Identity-H'),
'DescendantFonts' : [cidFontDictObject],
'ToUnicode' : toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
fontfile = open(filename, 'rb')
length1 = 0
while True:
data = fontfile.read(4096)
if not data: break
length1 += len(data)
self.currentstream.write(data)
fontfile.close()
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = [u'\u0000'] * 65536
cmap = font.get_charmap()
unicode_mapping = []
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange)))
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
ps_name = Name(font.get_sfnt()[(1,0,0,6)])
pclt = font.get_sfnt_table('pclt') \
or { 'capHeight': 0, 'xHeight': 0 }
post = font.get_sfnt_table('post') \
or { 'italicAngle': (0,0) }
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False #ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH: flags |= 1 << 0
if 0: flags |= 1 << 1 # TODO: serif
if symbolic: flags |= 1 << 2
else: flags |= 1 << 5
if sf & ITALIC: flags |= 1 << 6
if 0: flags |= 1 << 16 # TODO: all caps
if 0: flags |= 1 << 17 # TODO: small caps
if 0: flags |= 1 << 18 # TODO: force bold
descriptor = {
'Type' : Name('FontDescriptor'),
'FontName' : ps_name,
'Flags' : flags,
'FontBBox' : [ cvt(x, nearest=False) for x in font.bbox ],
'Ascent' : cvt(font.ascender, nearest=False),
'Descent' : cvt(font.descender, nearest=False),
'CapHeight' : cvt(pclt['capHeight'], nearest=False),
'XHeight' : cvt(pclt['xHeight']),
'ItalicAngle' : post['italicAngle'][1], # ???
'StemV' : 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
warnings.warn(("'%s' can not be subsetted into a Type 3 font. " +
"The entire font will be embedded in the output.") %
os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, { 'Type': Name('ExtGState'),
'CA': alpha, 'ca': alpha })
return name
def hatchPattern(self, lst):
pattern = self.hatchPatterns.get(lst, None)
if pattern is not None:
return pattern[0]
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[lst] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 144.0
density = 24.0
for lst, name in self.hatchPatterns.items():
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = { 'Procsets':
[ Name(x) for x in "PDF Text ImageB ImageC ImageI".split() ] }
self.beginStream(
ob.id, None,
{ 'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res })
# lst is a tuple of stroke color, fill color,
# number of - lines, number of / lines,
# number of | lines, number of \ lines
rgb = lst[0]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_stroke)
if lst[1] is not None:
rgb = lst[1]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
if lst[2]: # -
for j in npy.arange(0.0, sidelen, density/lst[2]):
self.output(0, j, Op.moveto,
sidelen, j, Op.lineto)
if lst[3]: # /
for j in npy.arange(0.0, sidelen, density/lst[3]):
self.output(0, j, Op.moveto,
sidelen-j, sidelen, Op.lineto,
sidelen-j, 0, Op.moveto,
sidelen, j, Op.lineto)
if lst[4]: # |
for j in npy.arange(0.0, sidelen, density/lst[4]):
self.output(j, 0, Op.moveto,
j, sidelen, Op.lineto)
if lst[5]: # \
for j in npy.arange(sidelen, 0.0, -density/lst[5]):
self.output(sidelen, j, Op.moveto,
j, sidelen, Op.lineto,
j, 0, Op.moveto,
0, j, Op.lineto)
self.output(Op.stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
pair = self.images.get(image, None)
if pair is not None:
return pair[0]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self.images[image] = (name, ob)
return name
## These two from backend_ps.py
## TODO: alpha (SMask, p. 518 of pdf spec)
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
a = rgba[:,:,3:]
return h, w, rgb.tostring(), a.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def writeImages(self):
for img, pair in self.images.items():
img.flipud_out()
if img.is_grayscale:
height, width, data = self._gray(img)
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 })
self.currentstream.write(data) # TODO: predictors (i.e., output png)
self.endStream()
else:
height, width, data, adata = self._rgb(img)
smaskObject = self.reserveObject("smask")
stream = self.beginStream(
smaskObject.id,
self.reserveObject('length of smask stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 })
self.currentstream.write(adata) # TODO: predictors (i.e., output png)
self.endStream()
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceRGB'), 'BitsPerComponent': 8,
'SMask': smaskObject})
self.currentstream.write(data) # TODO: predictors (i.e., output png)
self.endStream()
img.flipud_out()
def markerObject(self, path, trans, fillp, lw):
"""Return name of a marker XObject representing the given path."""
key = (path, trans, fillp is not None, lw)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
self.markers[key] = (name, ob, path, trans, fillp, lw)
else:
name = result[0]
return name
def writeMarkers(self):
for tup in self.markers.values():
name, object, path, trans, fillp, lw = tup
bbox = path.get_extents(trans)
bbox = bbox.padded(lw * 0.5)
self.beginStream(
object.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents) })
self.writePath(path, trans)
if fillp:
self.output(Op.fill_stroke)
else:
self.output(Op.stroke)
self.endStream()
#@staticmethod
def pathOperations(path, transform, simplify=None):
tpath = transform.transform_path(path)
cmds = []
last_points = None
for points, code in tpath.iter_segments(simplify):
if code == Path.MOVETO:
cmds.extend(points)
cmds.append(Op.moveto)
elif code == Path.LINETO:
cmds.extend(points)
cmds.append(Op.lineto)
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
cmds.extend(points[2:])
cmds.append(Op.curveto)
elif code == Path.CURVE4:
cmds.extend(points)
cmds.append(Op.curveto)
elif code == Path.CLOSEPOLY:
cmds.append(Op.closepath)
last_points = points
return cmds
pathOperations = staticmethod(pathOperations)
def writePath(self, path, transform):
cmds = self.pathOperations(
path, transform, self.simplify)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell()
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell()
self.write("xref\n0 %d\n" % self.nextObject)
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print >>sys.stderr, \
'No offset for object %d (%s)' % (i, name)
borken = True
else:
self.write("%010d %05d n \n" % (offset, generation))
i += 1
if borken:
raise AssertionError, 'Indirect object does not exist'
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write("trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject }))
# Could add 'ID'
self.write("\nstartxref\n%d\n%%%%EOF\n" % self.startxref)
class RendererPdf(RendererBase):
truetype_font_cache = maxdict(50)
afm_font_cache = maxdict(50)
def __init__(self, file, dpi, image_dpi):
RendererBase.__init__(self)
self.file = file
self.gc = self.new_gc()
self.file.used_characters = self.used_characters = {}
self.mathtext_parser = MathTextParser("Pdf")
self.dpi = dpi
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = gc._fillcolor
gc._fillcolor = fillcolor
delta = self.gc.delta(gc)
if delta: self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, (str, unicode)):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# MGDTODO: Support clippath here
gc = self.new_gc()
if bbox is not None:
gc.set_clip_rectangle(bbox)
self.check_gc(gc)
h, w = im.get_size_out()
h, w = 72.0*h/self.image_dpi, 72.0*w/self.image_dpi
imob = self.file.imageObject(im)
self.file.output(Op.gsave, w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
stream = self.file.writePath(path, transform)
self.file.output(self.gc.paint())
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
self.check_gc(gc, rgbFace)
fillp = rgbFace is not None
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fillp, self.gc._linewidth)
tpath = trans.transform_path(path)
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def _setup_textpos(self, x, y, descent, angle, oldx=0, oldy=0, olddescent=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, (y + descent) - (oldy + olddescent), Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output( cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, descent, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
fontsize *= self.dpi/72.0
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype), Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
fontsize *= self.dpi/72.0
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, self.dpi)
page = iter(dvi).next()
dvi.close()
# Gather font information and do some setup for combining
# characters into strings.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
psfont = self.tex_font_mapping(dvifont.texname)
pdfname = self.file.fontName(psfont.filename)
if self.file.fontInfo.get(pdfname, None) is None:
self.file.fontInfo[pdfname] = Bunch(
encodingfile=psfont.encoding,
widths=dvifont.widths,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
seq += [['text', x1, y1, [chr(glyph)], x1+width]]
# Find consecutive text strings with constant x coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx = 0, 0
while i < len(seq)-1:
elt, next = seq[i:i+2]
if elt[0] == next[0] == 'text' and elt[2] == next[2]:
offset = elt[4] - next[1]
if abs(offset) < 0.1:
elt[3][-1] += next[3][0]
elt[4] += next[4]-next[1]
else:
elt[3] += [offset*1000.0/dvifont.size, next[3][0]]
elt[4] = next[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform((elt[1], elt[2]))
self._setup_textpos(curx, cury, 0, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g. variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0,0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype == 3:
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath: return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points() * self.dpi/72.0
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
descent = -b * fontsize / 1000
fonttype = 42
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
descent = font.get_descent() / 64.0
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, str) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = unicode(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1
and chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, descent, angle)
self.file.output(self.encode_string(s, fonttype), Op.show, Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
cmap = font.get_charmap()
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
olddescent = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, descent, 0, oldx, 0, olddescent, 0)
self.file.output(self.encode_string(chunk, fonttype), Op.show)
oldx = newx
olddescent = descent
lastgind = None
for c in chunk:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, self.dpi)
page = iter(dvi).next()
dvi.close()
# A total height (including the descent) needs to be returned.
return page.width, page.height+page.descent, page.descent
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale
h *= scale
d *= scale
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(prop, fontext='afm')
font = self.afm_font_cache.get(filename)
if font is None:
fh = file(filename)
font = AFM(fh)
self.afm_font_cache[filename] = font
fh.close()
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.truetype_font_cache.get(key)
if font is None:
filename = findfont(prop)
font = self.truetype_font_cache.get(filename)
if font is None:
font = FT2Font(str(filename))
self.truetype_font_cache[filename] = font
self.truetype_font_cache[key] = font
font.clear()
font.set_size(prop.get_size_in_points(), self.dpi)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width / self.dpi, self.file.height / self.dpi
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return `d`
def _strokep(self):
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def _fillp(self):
return ((self._fillcolor is not None or self._hatch) and
(len(self._fillcolor) <= 3 or self._fillcolor[3] != 0.0))
def close_and_paint(self):
if self._strokep():
if self._fillp():
return Op.close_fill_stroke
else:
return Op.close_stroke
else:
if self._fillp():
return Op.fill
else:
return Op.endpath
def paint(self):
if self._strokep():
if self._fillp():
return Op.fill_stroke
else:
return Op.stroke
else:
if self._fillp():
return Op.fill
else:
return Op.endpath
capstyles = { 'butt': 0, 'round': 1, 'projecting': 2 }
joinstyles = { 'miter': 0, 'round': 1, 'bevel': 2 }
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha):
name = self.file.alphaState(alpha)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch = hatch.lower()
lst = ( self._rgb,
self._fillcolor,
hatch.count('-') + hatch.count('+'),
hatch.count('/') + hatch.count('x'),
hatch.count('|') + hatch.count('+'),
hatch.count('\\') + hatch.count('x') )
name = self.file.hatchPattern(lst)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while (self._cliprect, self._clippath) != (cliprect, clippath) \
and self.parent is not None:
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if (self._cliprect, self._clippath) != (cliprect, clippath):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
cmds.extend(
PdfFile.pathOperations(
*clippath.get_transformed_path_and_affine()) +
[Op.clip, Op.endpath])
return cmds
commands = (
(('_cliprect', '_clippath'), clip_cmd), # must come first since may pop
(('_alpha',), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
different = bool(ours != theirs)
except ValueError:
ours = npy.asarray(ours)
theirs = npy.asarray(theirs)
different = ours.shape != theirs.shape or npy.any(ours != theirs)
if different:
break
if different:
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
self._fillcolor = other._fillcolor
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPdf(thisFig)
manager = FigureManagerPdf(canvas, num)
return manager
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
ppi = 72 # Postscript points in an inch
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(ppi)
width, height = self.figure.get_size_inches()
file = PdfFile(width, height, ppi, filename)
renderer = MixedModeRenderer(
width, height, ppi, RendererPdf(file, ppi, image_dpi))
self.figure.draw(renderer)
renderer.finalize()
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureManager = FigureManagerPdf
| agpl-3.0 |
fbagirov/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/mpl_toolkits/axisartist/grid_finder.py | 7 | 11995 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
import matplotlib.cbook as mcbook
from matplotlib.transforms import Bbox
from . import clip_path
clip_line_to_rect = clip_path.clip_line_to_rect
import matplotlib.ticker as mticker
from matplotlib.transforms import Transform
# extremes finder
class ExtremeFinderSimple(object):
def __init__(self, nx, ny):
self.nx, self.ny = nx, ny
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
get extreme values.
x1, y1, x2, y2 in image coordinates (0-based)
nx, ny : number of division in each axis
"""
x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
x, y = np.meshgrid(x_, y_)
lon, lat = transform_xy(np.ravel(x), np.ravel(y))
lon_min, lon_max = lon.min(), lon.max()
lat_min, lat_max = lat.min(), lat.max()
return self._add_pad(lon_min, lon_max, lat_min, lat_max)
def _add_pad(self, lon_min, lon_max, lat_min, lat_max):
""" a small amount of padding is added because the current
clipping algorithms seems to fail when the gridline ends at
the bbox boundary.
"""
dlon = (lon_max - lon_min) / self.nx
dlat = (lat_max - lat_min) / self.ny
lon_min, lon_max = lon_min - dlon, lon_max + dlon
lat_min, lat_max = lat_min - dlat, lat_max + dlat
return lon_min, lon_max, lat_min, lat_max
class GridFinderBase(object):
def __init__(self,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1=None,
tick_formatter2=None):
"""
the transData of the axes to the world coordinate.
locator1, locator2 : grid locator for 1st and 2nd axis.
Derived must define "transform_xy, inv_transform_xy"
(may use update_transform)
"""
super(GridFinderBase, self).__init__()
self.extreme_finder = extreme_finder
self.grid_locator1 = grid_locator1
self.grid_locator2 = grid_locator2
self.tick_formatter1 = tick_formatter1
self.tick_formatter2 = tick_formatter2
def get_grid_info(self,
x1, y1, x2, y2):
"""
lon_values, lat_values : list of grid values. if integer is given,
rough number of grids in each direction.
"""
extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2)
# min & max rage of lat (or lon) for each grid line will be drawn.
# i.e., gridline of lon=0 will be drawn from lat_min to lat_max.
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
self.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
self.grid_locator2(lat_min, lat_max)
if lon_factor is None:
lon_values = np.asarray(lon_levs[:lon_n])
else:
lon_values = np.asarray(lon_levs[:lon_n]/lon_factor)
if lat_factor is None:
lat_values = np.asarray(lat_levs[:lat_n])
else:
lat_values = np.asarray(lat_levs[:lat_n]/lat_factor)
lon_lines, lat_lines = self._get_raw_grid_lines(lon_values,
lat_values,
lon_min, lon_max,
lat_min, lat_max)
ddx = (x2-x1)*1.e-10
ddy = (y2-y1)*1.e-10
bb = Bbox.from_extents(x1-ddx, y1-ddy, x2+ddx, y2+ddy)
grid_info = {}
grid_info["extremes"] = extremes
grid_info["lon_lines"] = lon_lines
grid_info["lat_lines"] = lat_lines
grid_info["lon"] = self._clip_grid_lines_and_find_ticks(lon_lines,
lon_values,
lon_levs,
bb)
grid_info["lat"] = self._clip_grid_lines_and_find_ticks(lat_lines,
lat_values,
lat_levs,
bb)
tck_labels = grid_info["lon"]["tick_labels"] = dict()
for direction in ["left", "bottom", "right", "top"]:
levs = grid_info["lon"]["tick_levels"][direction]
tck_labels[direction] = self.tick_formatter1(direction,
lon_factor, levs)
tck_labels = grid_info["lat"]["tick_labels"] = dict()
for direction in ["left", "bottom", "right", "top"]:
levs = grid_info["lat"]["tick_levels"][direction]
tck_labels[direction] = self.tick_formatter2(direction,
lat_factor, levs)
return grid_info
def _get_raw_grid_lines(self,
lon_values, lat_values,
lon_min, lon_max, lat_min, lat_max):
lons_i = np.linspace(lon_min, lon_max, 100) # for interpolation
lats_i = np.linspace(lat_min, lat_max, 100)
lon_lines = [self.transform_xy(np.zeros_like(lats_i)+lon, lats_i) \
for lon in lon_values]
lat_lines = [self.transform_xy(lons_i, np.zeros_like(lons_i)+lat) \
for lat in lat_values]
return lon_lines, lat_lines
def _clip_grid_lines_and_find_ticks(self, lines, values, levs, bb):
gi = dict()
gi["values"] = []
gi["levels"] = []
gi["tick_levels"] = dict(left=[], bottom=[], right=[], top=[])
gi["tick_locs"] = dict(left=[], bottom=[], right=[], top=[])
gi["lines"] = []
tck_levels = gi["tick_levels"]
tck_locs = gi["tick_locs"]
for (lx, ly), v, lev in zip(lines, values, levs):
xy, tcks = clip_line_to_rect(lx, ly, bb)
if not xy:
continue
gi["levels"].append(v)
gi["lines"].append(xy)
for tck, direction in zip(tcks, ["left", "bottom", "right", "top"]):
for t in tck:
tck_levels[direction].append(lev)
tck_locs[direction].append(t)
return gi
def update_transform(self, aux_trans):
if isinstance(aux_trans, Transform):
def transform_xy(x, y):
x, y = np.asarray(x), np.asarray(y)
ll1 = np.concatenate((x[:,np.newaxis], y[:,np.newaxis]), 1)
ll2 = aux_trans.transform(ll1)
lon, lat = ll2[:,0], ll2[:,1]
return lon, lat
def inv_transform_xy(x, y):
x, y = np.asarray(x), np.asarray(y)
ll1 = np.concatenate((x[:,np.newaxis], y[:,np.newaxis]), 1)
ll2 = aux_trans.inverted().transform(ll1)
lon, lat = ll2[:,0], ll2[:,1]
return lon, lat
else:
transform_xy, inv_transform_xy = aux_trans
self.transform_xy = transform_xy
self.inv_transform_xy = inv_transform_xy
def update(self, **kw):
for k in kw:
if k in ["extreme_finder",
"grid_locator1",
"grid_locator2",
"tick_formatter1",
"tick_formatter2"]:
setattr(self, k, kw[k])
else:
raise ValueError("unknown update property '%s'" % k)
class GridFinder(GridFinderBase):
def __init__(self,
transform,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
transform : transform from the image coordinate (which will be
the transData of the axes to the world coordinate.
or transform = (transform_xy, inv_transform_xy)
locator1, locator2 : grid locator for 1st and 2nd axis.
"""
if extreme_finder is None:
extreme_finder = ExtremeFinderSimple(20, 20)
if grid_locator1 is None:
grid_locator1 = MaxNLocator()
if grid_locator2 is None:
grid_locator2 = MaxNLocator()
if tick_formatter1 is None:
tick_formatter1 = FormatterPrettyPrint()
if tick_formatter2 is None:
tick_formatter2 = FormatterPrettyPrint()
super(GridFinder, self).__init__( \
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
self.update_transform(transform)
class MaxNLocator(mticker.MaxNLocator):
def __init__(self, nbins = 10, steps = None,
trim = True,
integer=False,
symmetric=False,
prune=None):
mticker.MaxNLocator.__init__(self, nbins, steps=steps,
trim=trim, integer=integer,
symmetric=symmetric, prune=prune)
self.create_dummy_axis()
self._factor = None
def __call__(self, v1, v2):
if self._factor is not None:
self.set_bounds(v1*self._factor, v2*self._factor)
locs = mticker.MaxNLocator.__call__(self)
return np.array(locs), len(locs), self._factor
else:
self.set_bounds(v1, v2)
locs = mticker.MaxNLocator.__call__(self)
return np.array(locs), len(locs), None
def set_factor(self, f):
self._factor = f
class FixedLocator(object):
def __init__(self, locs):
self._locs = locs
self._factor = None
def __call__(self, v1, v2):
if self._factor is None:
v1, v2 = sorted([v1, v2])
else:
v1, v2 = sorted([v1*self._factor, v2*self._factor])
locs = np.array([l for l in self._locs if ((v1 <= l) and (l <= v2))])
return locs, len(locs), self._factor
def set_factor(self, f):
self._factor = f
# Tick Formatter
class FormatterPrettyPrint(object):
def __init__(self, useMathText=True):
self._fmt = mticker.ScalarFormatter(useMathText=useMathText, useOffset=False)
self._fmt.create_dummy_axis()
self._ignore_factor = True
def __call__(self, direction, factor, values):
if not self._ignore_factor:
if factor is None:
factor = 1.
values = [v/factor for v in values]
#values = [v for v in values]
self._fmt.set_locs(values)
return [self._fmt(v) for v in values]
class DictFormatter(object):
def __init__(self, format_dict, formatter=None):
"""
format_dict : dictionary for format strings to be used.
formatter : fall-back formatter
"""
super(DictFormatter, self).__init__()
self._format_dict = format_dict
self._fallback_formatter = formatter
def __call__(self, direction, factor, values):
"""
factor is ignored if value is found in the dictionary
"""
if self._fallback_formatter:
fallback_strings = self._fallback_formatter(direction, factor, values)
else:
fallback_strings = [""]*len(values)
r = [self._format_dict.get(k, v) for k, v in zip(values,
fallback_strings)]
return r
if __name__ == "__main__":
locator = MaxNLocator()
locs, nloc, factor = locator(0, 100)
fmt = FormatterPrettyPrint()
print(fmt("left", None, locs))
| mit |
zymsys/sms-tools | lectures/09-Sound-description/plots-code/mfcc.py | 25 | 1103 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
mfcc = ess.MFCC(numberCoefficients = 12)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
mfccs = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
mfcc_bands, mfcc_coeffs = mfcc(mX)
mfccs.append(mfcc_coeffs)
mfccs = np.array(mfccs)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
numFrames = int(mfccs[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, 1+np.arange(12), np.transpose(mfccs[:,1:]))
plt.ylabel('coefficients')
plt.title('MFCCs')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('mfcc.png')
plt.show()
| agpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.