repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pbillerot/picsou | picsou_graph.py | 1 | 4269 | #!/usr/bin/env python
# -*- coding:Utf-8 -*-
"""
Fenêtre secondaire
"""
from gi.repository import Gtk
# import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar
import matplotlib.dates as mdates
from datetime import datetime
class PicsouGraph(Gtk.Window):
""" Fenêtre de test """
def __init__(self, parent, ptf_id):
Gtk.Window.__init__(self, title="myWindow")
# self.connect("destroy", lambda x: Gtk.main_quit())
self.config = parent.config
self.tools = parent.tools
self.parent = parent
ptfs = self.tools.sql_to_dict("""
SELECT * FROM PTF WHERE ptf_id = :id
""", {"id": ptf_id})
ptf = ptfs[0]
cours = self.tools.sql_to_dict("""
SELECT * FROM cours WHERE cours_ptf_id = :id order by cours_date
""", {"id": ptf_id})
self.set_title("Cours de " + ptf_id + " - " + ptf["ptf_name"])
self.activate_focus()
self.set_border_width(10)
self.set_default_size(1200, 800)
""" Debut matplotlib """
cours_dates = []
cours_quotes = []
cours_ema12 = []
cours_ema26 = []
cours_ema50 = []
cours_trade = []
cours_ppp = []
cours_volume = []
cours_rsi = []
for cour in cours:
dt = datetime.strptime(cour["cours_date"], '%Y-%m-%d')
cours_dates.append(dt)
cours_quotes.append(float(cour["cours_close"]))
cours_ema12.append(float(cour["cours_ema12"]))
cours_ema26.append(float(cour["cours_ema26"]))
cours_ema50.append(float(cour["cours_ema50"]))
if cour["cours_trade"] in ('SSS', 'TTT', 'RRR'):
cours_trade.append(float(cour["cours_close"]))
else:
cours_trade.append(None)
if ptf["ptf_date"] != "" and cour["cours_date"] >= ptf["ptf_date"]:
cours_ppp.append(float(cour["cours_close"]))
else:
cours_ppp.append(None)
cours_volume.append(float(cour["cours_volume"]))
cours_rsi.append(float(cour["cours_rsi"]))
# plt.style.use('seaborn-paper')
# fig = plt.figure()
# ax1 = plt.subplot(111)
fig, ax1 = plt.subplots()
ax1.plot(cours_dates, cours_quotes, 'o-', label='Cours')
ax1.plot(cours_dates, cours_ema12, '-', label='EMA 12')
ax1.plot(cours_dates, cours_ema26, '-', label='EMA 26')
ax1.plot(cours_dates, cours_ema50, '-', label='EMA 50')
ax1.plot(cours_dates, cours_trade, 'o-', label='Simul', linewidth=2)
ax1.plot(cours_dates, cours_ppp, 'o-', label='Réel'.decode("utf-8"), linewidth=2)
ax1.set_ylabel('Cours (Euro)')
ax1.set_xlabel('Date')
ax1.legend(loc=3)
# format the ticks
# days = mdates.DayLocator()
# daysFmt = mdates.DateFormatter('%d')
# months = mdates.MonthLocator()
# monthsFmt = mdates.DateFormatter('%Y-%m')
# ax1.xaxis.set_major_locator(months)
# ax1.xaxis.set_major_formatter(monthsFmt)
# ax1.xaxis.set_minor_locator(days)
# ax1.xaxis.set_minor_formatter(daysFmt)
ax2 = ax1.twinx()
ax2.plot(cours_dates, cours_rsi, '--', label='RSI')
ax2.set_ylabel('RSI')
ax2.legend(loc=4)
fig.autofmt_xdate()
plt.suptitle("Cours de " + ptf_id + " - " + ptf["ptf_name"])
plt.grid()
# plt.savefig('dates-tutorial01.png')
# plt.show()
canvas = FigureCanvas(fig) # a Gtk.DrawingArea
canvas.set_size_request(800, 600)
vbox = Gtk.VBox()
self.add(vbox)
vbox.pack_start(canvas, True, True, 0)
toolbar = NavigationToolbar(canvas, self)
vbox.pack_end(toolbar, False, False, 0)
# sw = Gtk.ScrolledWindow()
# self.add(sw)
# # A scrolled window border goes outside the scrollbars and viewport
# sw.set_border_width(10)
# sw.add_with_viewport(canvas)
# self.add(Gtk.Label("This is another window"))
self.show_all() | mit |
gwaygenomics/pancancer | scripts/within_tissue_analysis.py | 1 | 2691 | """
Gregory Way 2017
PanCancer NF1/RAS Classifier
scripts/within_tissue_analysis.py
Usage: Run in command line
python within_tissue_analysis.py
with the following required flags:
--genes comma separated string of HUGO gene symbols
and the following optional flags:
--diseases comma separated string of disease types to include
--l1_ratios comma separated string of l1 parameters to test
--folder string indicating the location to save results
--remove_hyper if present, remove hypermutated tumors
Output:
Results of single tissue classifier run through pancancer_classifier.py
"""
import os
import subprocess
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--genes',
help='Comma separated string of HUGO gene symbols')
parser.add_argument('-d', '--diseases', default='Auto',
help='Comma separated string of disease types to consider')
parser.add_argument('-a', '--alphas', default='0.1,0.15,0.2,0.5,0.8,1',
help='the alphas for parameter sweep')
parser.add_argument('-l', '--l1_ratios', default='0,0.1,0.15,0.18,0.2,0.3',
help='the l1 ratios for parameter sweep')
parser.add_argument('-v', '--remove_hyper', action='store_true',
help='Remove hypermutated samples')
parser.add_argument('-f', '--alt_folder', default='Auto',
help='location to save')
args = parser.parse_args()
# Load command arguments
genes = args.genes
diseases = args.diseases.split(',')
folder = args.alt_folder
alphas = args.alphas
l1_ratios = args.l1_ratios
remove_hyper = args.remove_hyper
base_folder = os.path.join('classifiers', 'within_disease',
genes.replace(',', '_'))
if diseases == 'Auto':
sample_freeze_file = os.path.join('data', 'sample_freeze.tsv')
sample_freeze = pd.read_table(sample_freeze_file, index_col=0)
disease_types = sample_freeze['DISEASE'].unique().tolist()
else:
disease_types = diseases
# Loop over disease types
for acronym in disease_types:
print(acronym)
if folder == 'Auto':
alt_folder = os.path.join(base_folder, acronym)
else:
alt_folder = os.path.join(folder, acronym)
command = ['python', os.path.join('scripts', 'pancancer_classifier.py'),
'--genes', genes, '--diseases', acronym, '--drop',
'--copy_number', '--alphas', alphas, '--l1_ratios', l1_ratios,
'--alt_folder', alt_folder, '--shuffled', '--keep_intermediate']
if remove_hyper:
command += ['--remove_hyper']
subprocess.call(command)
| bsd-3-clause |
tdhopper/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 1 | 33557 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
import joblib
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.random_projection import SparseRandomProjection
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from sklearn.preprocessing import FunctionTransformer
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(tol=1e-3),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super().fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_almost_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super().fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_almost_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_almost_equal(sparse_results, dense_results)
class DummySizeEstimator(BaseEstimator):
def fit(self, X, y):
self.training_size_ = X.shape[0]
self.training_hash_ = joblib.hash(X)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert (base_estimator.score(X_train, y_train) ==
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert (base_estimator.score(X_train, y_train) >
ensemble.score(X_train, y_train))
# check that each sampling correspond to a complete bootstrap resample.
# the size of each bootstrap should be the same as the input data but
# the data should be different (checked using the hash of the data).
ensemble = BaggingRegressor(base_estimator=DummySizeEstimator(),
bootstrap=True).fit(X_train, y_train)
training_hash = []
for estimator in ensemble.estimators_:
assert estimator.training_size_ == X_train.shape[0]
training_hash.append(estimator.training_hash_)
assert len(set(training_hash)) == len(training_hash)
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert boston.data.shape[1] == np.unique(features).shape[0]
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert boston.data.shape[1] > np.unique(features).shape[0]
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert abs(test_score - clf.oob_score_) < 0.1
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert abs(test_score - clf.oob_score_) < 0.1
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_almost_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert not hasattr(BaggingClassifier(base).fit(X, y), 'decision_function')
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
X_err = np.hstack((X_test, np.zeros((X_test.shape[0], 1))))
assert_raise_message(ValueError, "Number of features of the model "
"must match the input. Model n_features is {0} "
"and input n_features is {1} "
"".format(X_test.shape[1], X_err.shape[1]),
ensemble.decision_function, X_err)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_, DecisionTreeClassifier)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_, DecisionTreeClassifier)
ensemble = BaggingClassifier(Perceptron(tol=1e-3),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_, Perceptron)
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_, DecisionTreeRegressor)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_, DecisionTreeRegressor)
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_, SVR)
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
assert isinstance(estimator[0].steps[-1][1].random_state, int)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert len(clf_ws) == n_estimators
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert (set([tree.random_state for tree in clf_ws]) ==
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_
def test_estimators_samples():
# Check that format of estimators_samples_ is correct and that results
# generated at fit time can be identically reproduced at a later time
# using data saved in object attributes.
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5,
max_features=0.5, random_state=1,
bootstrap=False)
bagging.fit(X, y)
# Get relevant attributes
estimators_samples = bagging.estimators_samples_
estimators_features = bagging.estimators_features_
estimators = bagging.estimators_
# Test for correct formatting
assert len(estimators_samples) == len(estimators)
assert len(estimators_samples[0]) == len(X) // 2
assert estimators_samples[0].dtype.kind == 'i'
# Re-fit single estimator to test for consistent sampling
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator_features = estimators_features[estimator_index]
estimator = estimators[estimator_index]
X_train = (X[estimator_samples])[:, estimator_features]
y_train = y[estimator_samples]
orig_coefs = estimator.coef_
estimator.fit(X_train, y_train)
new_coefs = estimator.coef_
assert_array_almost_equal(orig_coefs, new_coefs)
def test_estimators_samples_deterministic():
# This test is a regression test to check that with a random step
# (e.g. SparseRandomProjection) and a given random state, the results
# generated at fit time can be identically reproduced at a later time using
# data saved in object attributes. Check issue #9524 for full discussion.
iris = load_iris()
X, y = iris.data, iris.target
base_pipeline = make_pipeline(SparseRandomProjection(n_components=2),
LogisticRegression())
clf = BaggingClassifier(base_estimator=base_pipeline,
max_samples=0.5,
random_state=0)
clf.fit(X, y)
pipeline_estimator_coef = clf.estimators_[0].steps[-1][1].coef_.copy()
estimator = clf.estimators_[0]
estimator_sample = clf.estimators_samples_[0]
estimator_feature = clf.estimators_features_[0]
X_train = (X[estimator_sample])[:, estimator_feature]
y_train = y[estimator_sample]
estimator.fit(X_train, y_train)
assert_array_equal(estimator.steps[-1][1].coef_, pipeline_estimator_coef)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert bagging._max_samples == max_samples
def test_set_oob_score_label_encoding():
# Make sure the oob_score doesn't change when the labels change
# See: https://github.com/scikit-learn/scikit-learn/issues/8933
random_state = 5
X = [[-1], [0], [1]] * 5
Y1 = ['A', 'B', 'C'] * 5
Y2 = [-1, 0, 1] * 5
Y3 = [0, 1, 2] * 5
x1 = BaggingClassifier(oob_score=True,
random_state=random_state).fit(X, Y1).oob_score_
x2 = BaggingClassifier(oob_score=True,
random_state=random_state).fit(X, Y2).oob_score_
x3 = BaggingClassifier(oob_score=True,
random_state=random_state).fit(X, Y3).oob_score_
assert [x1, x2] == [x3, x3]
def replace(X):
X = X.astype('float', copy=True)
X[~np.isfinite(X)] = 0
return X
def test_bagging_regressor_with_missing_inputs():
# Check that BaggingRegressor can accept X with missing/infinite data
X = np.array([
[1, 3, 5],
[2, None, 6],
[2, np.nan, 6],
[2, np.inf, 6],
[2, np.NINF, 6],
])
y_values = [
np.array([2, 3, 3, 3, 3]),
np.array([
[2, 1, 9],
[3, 6, 8],
[3, 6, 8],
[3, 6, 8],
[3, 6, 8],
])
]
for y in y_values:
regressor = DecisionTreeRegressor()
pipeline = make_pipeline(
FunctionTransformer(replace), regressor
)
pipeline.fit(X, y).predict(X)
bagging_regressor = BaggingRegressor(pipeline)
y_hat = bagging_regressor.fit(X, y).predict(X)
assert y.shape == y_hat.shape
# Verify that exceptions can be raised by wrapper regressor
regressor = DecisionTreeRegressor()
pipeline = make_pipeline(regressor)
assert_raises(ValueError, pipeline.fit, X, y)
bagging_regressor = BaggingRegressor(pipeline)
assert_raises(ValueError, bagging_regressor.fit, X, y)
def test_bagging_classifier_with_missing_inputs():
# Check that BaggingClassifier can accept X with missing/infinite data
X = np.array([
[1, 3, 5],
[2, None, 6],
[2, np.nan, 6],
[2, np.inf, 6],
[2, np.NINF, 6],
])
y = np.array([3, 6, 6, 6, 6])
classifier = DecisionTreeClassifier()
pipeline = make_pipeline(
FunctionTransformer(replace), classifier
)
pipeline.fit(X, y).predict(X)
bagging_classifier = BaggingClassifier(pipeline)
bagging_classifier.fit(X, y)
y_hat = bagging_classifier.predict(X)
assert y.shape == y_hat.shape
bagging_classifier.predict_log_proba(X)
bagging_classifier.predict_proba(X)
# Verify that exceptions can be raised by wrapper classifier
classifier = DecisionTreeClassifier()
pipeline = make_pipeline(classifier)
assert_raises(ValueError, pipeline.fit, X, y)
bagging_classifier = BaggingClassifier(pipeline)
assert_raises(ValueError, bagging_classifier.fit, X, y)
def test_bagging_small_max_features():
# Check that Bagging estimator can accept low fractional max_features
X = np.array([[1, 2], [3, 4]])
y = np.array([1, 0])
bagging = BaggingClassifier(LogisticRegression(),
max_features=0.3, random_state=1)
bagging.fit(X, y)
| bsd-3-clause |
yugangzhang/chxanalys | chxanalys/SAXS.py | 1 | 21816 | """
Sep 10 Developed by Y.G.@CHX
[email protected]
This module is for the static SAXS analysis, such as fit form factor
"""
#import numpy as np
#from lmfit import Model
#from lmfit import minimize, Parameters, Parameter, report_fit
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from matplotlib.colors import LogNorm
from chxanalys.chx_libs import *
from chxanalys.chx_generic_functions import show_img, plot1D, find_index
from scipy.special import gamma, gammaln
from scipy.optimize import leastsq
def mono_sphere_form_factor_intensity( x, radius, delta_rho=100,fit_func='G'):
'''
Input:
x/q: in A-1, array or a value
radius/R: in A
delta_rho: Scattering Length Density(SLD) difference between solvent and the scatter, A-2
Output:
The form factor intensity of the mono dispersed scatter
'''
q=x
R=radius
qR= q * R
volume = (4.0/3.0)*np.pi*(R**3)
prefactor = 36*np.pi*( ( delta_rho * volume)**2 )/(4*np.pi)
P = ( np.sin(qR) - qR*np.cos(qR) )**2/( qR**6 )
P*=prefactor
P=P.real
return P
def gaussion( x, u, sigma):
return 1/( sigma*np.sqrt(2*np.pi) )* np.exp( - ((x-u)**2)/(2*( sigma**2 ) ) )
def Schultz_Zimm(x,u,sigma):
'''http://sasfit.ingobressler.net/manual/Schultz-Zimm
See also The size distribution of ‘gold standard’ nanoparticles
Anal Bioanal Chem (2009) 395:1651–1660
DOI 10.1007/s00216-009-3049-5
'''
k = 1.0/ (sigma)**2
return 1.0/u * (x/u)**(k-1) * k**k*np.exp( -k*x/u)/gamma(k)
def distribution_func( radius=1.0, sigma=0.1, num_points=20, spread=3, func='G'):
'''
radius: the central radius
sigma: sqrt root of variance in percent
'''
if 1 - spread* sigma<=0:
spread= (1 - sigma)/sigma -1
#print( num_points )
x, rs= np.linspace( radius - radius*spread* sigma, radius+radius*spread*sigma, num_points,retstep=True)
#print(x)
if func=='G':
func=gaussion
elif func=='S':
func= Schultz_Zimm
return x, rs, func( x, radius, radius*sigma)
def poly_sphere_form_factor_intensity( x, radius, sigma=0.1, delta_rho=100, background=0, num_points=20, spread=5,
fit_func='G'):
'''
Input:
x/q: in A-1, array or a value
radius/R: in A
sigma:sqrt root of variance in percent
delta_rho: Scattering Length Density(SLD) difference between solvent and the scatter, A-2
fit_func: G: Guassian;S: Flory–Schulz distribution
Output:
The form factor intensity of the polydispersed scatter
'''
q=x
R= radius
if not hasattr(q, '__iter__'):
q=np.array( [q] )
v = np.zeros( (len(q)) )
if sigma==0:
v= mono_sphere_form_factor_intensity( q, R, delta_rho)
else:
r, rs, wt = distribution_func( radius=R, sigma=sigma,
num_points=num_points, spread=spread, func=fit_func)
for i, Ri in enumerate(r):
#print(Ri, wt[i],delta_rho, rs)
v += mono_sphere_form_factor_intensity( q, Ri, delta_rho)*wt[i]*rs
return v + background #* delta_rho
def poly_sphere_form_factor_intensity_q2( x, radius, sigma=0.1, delta_rho=1, fit_func='G'):#, scale=1, baseline=0):
'''
Input:
x/q: in A-1, array or a value
radius/R: in A
sigma:sqrt root of variance in percent
delta_rho: Scattering Length Density(SLD) difference between solvent and the scatter, A-2
Output:
The form factor intensity of the polydispersed scatter
'''
return poly_sphere_form_factor_intensity( x, radius, sigma, delta_rho, fit_func)*x**2 #* scale + baseline
def find_index_old( x,x0,tolerance= None):
#find the position of P in a list (plist) with tolerance
N=len(x)
i=0
position=None
if tolerance==None:
tolerance = (x[1]-x[0])/2.
if x0 > max(x):
position= len(x) -1
elif x0<min(x):
position=0
else:
for item in x:
if abs(item-x0)<=tolerance:
position=i
#print 'Found Index!!!'
break
i+=1
return position
def get_form_factor_fit( q, iq, guess_values, fit_range=None, fit_variables = None,function='poly_sphere',
fit_func='G',
*argv,**kwargs):
'''
Fit form factor for GUI
The support fitting functions include
poly_sphere (poly_sphere_form_factor_intensity),
mono_sphere (mono_sphere_form_factor_intensity)
Parameters
----------
q: q vector
iq: form factor
guess_values:a dict, contains keys
radius: the initial guess of spherecentral radius
sigma: the initial guess of sqrt root of variance in percent
function:
mono_sphere (mono_sphere_form_factor_intensity): fit by mono dispersed sphere model
poly_sphere (poly_sphere_form_factor_intensity): fit by poly dispersed sphere model
Returns
-------
fit resutls:
radius
sigma
an example:
result = fit_form_factor( q, iq, res_pargs=None,function='poly_sphere'
'''
if function=='poly_sphere':
mod = Model(poly_sphere_form_factor_intensity)#_q2 )
elif function=='mono_sphere':
mod = Model( mono_sphere_form_factor_intensity )
else:
print ("The %s is not supported.The supported functions include poly_sphere and mono_sphere"%function)
if fit_range is not None:
x1,x2= fit_range
q1=find_index( q,x1,tolerance= None)
q2=find_index( q,x2,tolerance= None)
else:
q1=0
q2=len(q)
q_=q[q1:q2]
iq_ = iq[q1:q2]
_r= guess_values[ 'radius']
_sigma = guess_values['sigma']
_delta_rho= guess_values['delta_rho']
_background = guess_values['background']
#_scale = guess_values['scale']
#_baseline = guess_values['baseline']
mod.set_param_hint( 'radius', min= _r/10, max=_r*10 )
mod.set_param_hint( 'sigma', min= _sigma/10, max=_sigma*10 )
#mod.set_param_hint( 'scale', min= _scale/1E3, max= _scale*1E3 )
#mod.set_param_hint( 'baseline', min= 0 )
#mod.set_param_hint( 'delta_rho', min= 0 )
#mod.set_param_hint( 'delta_rho', min= _delta_rho/1E6, max= _delta_rho*1E6 )
pars = mod.make_params( radius= _r, sigma=_sigma,delta_rho=_delta_rho,background=_background)# scale= _scale, baseline =_baseline )
if fit_variables is not None:
for var in list( fit_variables.keys()):
pars[var].vary = fit_variables[var]
#pars['delta_rho'].vary =False
fit_power = 0
result = mod.fit( iq_* q_**fit_power, pars, x = q_)#, fit_func=fit_func )
if function=='poly_sphere':
sigma = result.best_values['sigma']
elif function=='mono_sphere':
sigma=0
r = result.best_values['radius']
#scale = result.best_values['scale']
#baseline = result.best_values['baseline']
delta_rho= result.best_values['delta_rho']
return result, q_
def plot_form_factor_with_fit(q, iq, q_, result, fit_power=2, res_pargs=None, return_fig=False,
*argv,**kwargs):
if res_pargs is not None:
uid = res_pargs['uid']
path = res_pargs['path']
else:
if 'uid' in kwargs.keys():
uid = kwargs['uid']
else:
uid = 'uid'
if 'path' in kwargs.keys():
path = kwargs['path']
else:
path = ''
#fig = Figure()
#ax = fig.add_subplot(111)
fig, ax = plt.subplots()
title_qr = 'form_factor_fit'
plt.title('uid= %s:--->'%uid + title_qr,fontsize=20, y =1.02)
r = result.best_values['radius']
delta_rho= result.best_values['delta_rho']
sigma = result.best_values['sigma']
ax.semilogy( q, iq, 'ro', label='Form Factor')
ax.semilogy( q_, result.best_fit/q_**fit_power, '-b', lw=3, label='Fit')
txts = r'radius' + r' = %.2f '%( r/10.) + r'$ nm$'
ax.text(x =0.02, y=.35, s=txts, fontsize=14, transform=ax.transAxes)
txts = r'sigma' + r' = %.3f'%( sigma)
#txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$'
ax.text(x =0.02, y=.25, s=txts, fontsize=14, transform=ax.transAxes)
#txts = r'delta_rho' + r' = %.3e'%( delta_rho)
#txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$'
#ax.text(x =0.02, y=.35, s=txts, fontsize=14, transform=ax.transAxes)
ax.legend( loc = 'best' )
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
fp = path + 'uid=%s--form_factor--fit-'%(uid ) + '.png'
plt.savefig( fp, dpi=fig.dpi)
#fig.tight_layout()
plt.show()
if return_fig:
return fig
def fit_form_factor( q, iq, guess_values, fit_range=None, fit_variables = None, res_pargs=None,function='poly_sphere', fit_func='G', return_fig=False, *argv,**kwargs):
'''
Fit form factor
The support fitting functions include
poly_sphere (poly_sphere_form_factor_intensity),
mono_sphere (mono_sphere_form_factor_intensity)
Parameters
----------
q: q vector
iq: form factor
res_pargs: a dict, contains keys, such path, uid...
guess_values:a dict, contains keys
radius: the initial guess of spherecentral radius
sigma: the initial guess of sqrt root of variance in percent
function:
mono_sphere (mono_sphere_form_factor_intensity): fit by mono dispersed sphere model
poly_sphere (poly_sphere_form_factor_intensity): fit by poly dispersed sphere model
Returns
-------
fit resutls:
radius
sigma
an example:
result = fit_form_factor( q, iq, res_pargs=None,function='poly_sphere'
'''
result, q_ = get_form_factor_fit( q, iq, guess_values, fit_range=fit_range,
fit_variables = fit_variables,function=function, fit_func=fit_func )
plot_form_factor_with_fit(q, iq, q_, result, fit_power=0, res_pargs=res_pargs, return_fig=return_fig )
return result
def fit_form_factor2( q, iq, guess_values, fit_range=None, fit_variables = None, res_pargs=None,function='poly_sphere', fit_func='G',
*argv,**kwargs):
'''
Fit form factor
The support fitting functions include
poly_sphere (poly_sphere_form_factor_intensity),
mono_sphere (mono_sphere_form_factor_intensity)
Parameters
----------
q: q vector
iq: form factor
res_pargs: a dict, contains keys, such path, uid...
guess_values:a dict, contains keys
radius: the initial guess of spherecentral radius
sigma: the initial guess of sqrt root of variance in percent
function:
mono_sphere (mono_sphere_form_factor_intensity): fit by mono dispersed sphere model
poly_sphere (poly_sphere_form_factor_intensity): fit by poly dispersed sphere model
Returns
-------
fit resutls:
radius
sigma
an example:
result = fit_form_factor( q, iq, res_pargs=None,function='poly_sphere'
'''
if res_pargs is not None:
uid = res_pargs['uid']
path = res_pargs['path']
else:
if 'uid' in kwargs.keys():
uid = kwargs['uid']
else:
uid = 'uid'
if 'path' in kwargs.keys():
path = kwargs['path']
else:
path = ''
if function=='poly_sphere':
mod = Model(poly_sphere_form_factor_intensity)#_q2 )
elif function=='mono_sphere':
mod = Model( mono_sphere_form_factor_intensity )
else:
print ("The %s is not supported.The supported functions include poly_sphere and mono_sphere"%function)
if fit_range is not None:
x1,x2= fit_range
q1=find_index( q,x1,tolerance= None)
q2=find_index( q,x2,tolerance= None)
else:
q1=0
q2=len(q)
q_=q[q1:q2]
iq_ = iq[q1:q2]
_r= guess_values[ 'radius']
_sigma = guess_values['sigma']
_delta_rho= guess_values['delta_rho']
#_scale = guess_values['scale']
#_baseline = guess_values['baseline']
mod.set_param_hint( 'radius', min= _r/10, max=_r*10 )
mod.set_param_hint( 'sigma', min= _sigma/10, max=_sigma*10 )
#mod.set_param_hint( 'scale', min= _scale/1E3, max= _scale*1E3 )
#mod.set_param_hint( 'baseline', min= 0 )
#mod.set_param_hint( 'delta_rho', min= 0 )
mod.set_param_hint( 'delta_rho', min= _delta_rho/1E6, max= _delta_rho*1E6 )
pars = mod.make_params( radius= _r, sigma=_sigma,delta_rho=_delta_rho)# scale= _scale, baseline =_baseline )
if fit_variables is not None:
for var in list( fit_variables.keys()):
pars[var].vary = fit_variables[var]
#pars['delta_rho'].vary =False
fig = plt.figure(figsize=(8, 6))
title_qr = 'form_factor_fit'
plt.title('uid= %s:--->'%uid + title_qr,fontsize=20, y =1.02)
fit_power = 0#2
result = mod.fit( iq_* q_**fit_power, pars, x = q_ )#,fit_func= fit_func )
if function=='poly_sphere':
sigma = result.best_values['sigma']
elif function=='mono_sphere':
sigma=0
r = result.best_values['radius']
#scale = result.best_values['scale']
#baseline = result.best_values['baseline']
delta_rho= result.best_values['delta_rho']
#report_fit( result )
ax = fig.add_subplot(1,1,1 )
ax.semilogy( q, iq, 'ro', label='Form Factor')
ax.semilogy( q_, result.best_fit/q_**fit_power, '-b', lw=3, label='Fit')
txts = r'radius' + r' = %.2f '%( r/10.) + r'$ nm$'
ax.text(x =0.02, y=.35, s=txts, fontsize=14, transform=ax.transAxes)
txts = r'sigma' + r' = %.3f'%( sigma)
#txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$'
ax.text(x =0.02, y=.25, s=txts, fontsize=14, transform=ax.transAxes)
#txts = r'delta_rho' + r' = %.3e'%( delta_rho)
#txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$'
#ax.text(x =0.02, y=.35, s=txts, fontsize=14, transform=ax.transAxes)
ax.legend( loc = 'best' )
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
fp = path + '%s_form_factor_fit'%(uid ) + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
result = dict( radius =r, sigma = sigma, delta_rho = delta_rho )
return result
def show_saxs_qmap( img, pargs, width=200,vmin=.1, vmax=300, logs=True,image_name='',
show_colorbar=True, file_name='', show_time = False,
save=False, show_pixel=False, aspect= 1,save_format='png', cmap='viridis',):
'''
Show a SAXS q-map by giving
Parameter:
image: the frame
setup pargs, a dictionary, including
dpix #in mm, eiger 4m is 0.075 mm
lambda_ # wavelegth of the X-rays in Angstroms
Ldet # detector to sample distance (mm)
path where to save data
center: beam center in pixel, center[0] (x), should be image-y, and should be python-x
width: the showed area centered at center
Return:
None
'''
Ldet = pargs['Ldet']
dpix = pargs['dpix']
lambda_ = pargs['lambda_']
center = pargs['center']
cx,cy = center
path= pargs['path']
lx,ly = img.shape
#center = [ center[1], center[0] ] #due to python conventions
w= width
img_ = np.zeros( [w,w] )
minW, maxW = min( center[0]-w, center[1]-w ), max( center[0]-w, center[1]-w )
if w < minW:
img_ = img[cx-w//2:cx+w//2, cy+w//2:cy+w//2]
#elif w > maxW:
# img_[ cx-w//2:cx+w//2, cy+w//2:cy+w//2 ] =
ROI = [ max(0, center[0]-w), min( center[0]+w, lx), max(0, center[1]-w), min( ly, center[1]+w ) ]
#print( ROI )
ax = plt.subplots()
if not show_pixel:
#print( 'here' )
two_theta = utils.radius_to_twotheta(Ldet ,np.array( [ ( ROI[0] - cx ) * dpix,( ROI[1] - cx ) * dpix,
( ROI[2] - cy ) * dpix,( ROI[3] - cy ) * dpix,
] ))
qext = utils.twotheta_to_q(two_theta, lambda_)
#print( two_theta, qext )
show_img( 1e-15+ img[ ROI[0]:ROI[1], ROI[2]:ROI[3] ], ax=ax,
xlabel=r"$q_x$" + '('+r'$\AA^{-1}$'+')',
ylabel= r"$q_y$" + '('+r'$\AA^{-1}$'+')', extent=[qext[3],qext[2],qext[0],qext[1]],
vmin=vmin, vmax=vmax, logs= logs, image_name= image_name, file_name= file_name,
show_time = show_time,
save_format=save_format,cmap=cmap, show_colorbar=show_colorbar,
save= save, path=path,aspect= aspect)
else:
#qext = w
show_img( 1e-15+ img[ ROI[0]:ROI[1], ROI[2]:ROI[3] ], ax=ax,
xlabel= 'pixel', ylabel= 'pixel', extent=[ROI[0],ROI[1],ROI[2],ROI[3]],
vmin=vmin, vmax=vmax, logs= logs, image_name= image_name, save_format=save_format,cmap=cmap,
show_colorbar=show_colorbar, file_name= file_name, show_time = show_time,
save= save, path=path,aspect= aspect)
return ax
########################
##Fit sphere by scipy.leastsq fit
def fit_sphere_form_factor_func(parameters, ydata, xdata, yerror=None, nonvariables=None):
'''##Develop by YG at July 28, 2017 @CHX
This function is for fitting form factor of polyderse spherical particles by using scipy.leastsq fit
radius, sigma, delta_rho, background = parameters
'''
radius, sigma, delta_rho, background = parameters
fit = poly_sphere_form_factor_intensity( xdata, radius=radius, sigma=sigma,
delta_rho=delta_rho, background=background,
num_points=10, spread=3, fit_func='G' )
error = np.abs(ydata - fit)
return np.sqrt( error )
def fit_sphere_form_factor_by_leastsq( p0, q, pq, fit_range=None, ):
'''##Develop by YG at July 28, 2017 @CHX
Fitting form factor of polyderse spherical particles by using scipy.leastsq fit
Input:
radius, sigma, delta_rho, background = p0
Return
fit res, res[0] is the fitting parameters
'''
if fit_range is not None:
x1,x2 = fit_range
q1,q2 = find_index(q,x1),find_index(q,x2)
res = leastsq(fit_sphere_form_factor_func, [ p0 ], args=(pq[q1:q2], q[q1:q2], ),
ftol=1.49012e-38, xtol=1.49012e-38, factor=100,
full_output=1)
return res
def plot_fit_sphere_form_factor( q, pq, res, p0=None,xlim=None, ylim=None ):
'''##Develop by YG at July 28, 2017 @CHX'''
if p0 is not None:
radius, sigma, delta_rho, background = p0
fit_init = poly_sphere_form_factor_intensity( q, radius=radius, sigma=sigma,
delta_rho=delta_rho, background=background,
)
radius, sigma, delta_rho, background = res[0]
fit = poly_sphere_form_factor_intensity( q, radius=radius, sigma=sigma,
delta_rho=delta_rho, background=background,
)
fig, ax = plt.subplots()
if p0 is not None:
plot1D(x=q, y= fit_init, c='b',m='',ls='-', lw=3, ax=ax, logy=True, legend='Init_Fitting')
plot1D(x=q, y= fit, c='r',m='',ls='-', lw=3, ax=ax, logy=True, legend='Fitting')
plot1D(x=q, y = pq, c='k', m='X',ax=ax, markersize=3, ls='',legend='data',xlim=xlim,
ylim=ylim, logx=True, xlabel='Q (A-1)', ylabel='P(Q)')
txts = r'radius' + r' = %.2f '%( res[0][0]/10.) + r'$ nm$'
ax.text(x =0.02, y=.25, s=txts, fontsize=14, transform=ax.transAxes)
txts = r'sigma' + r' = %.3f'%( res[0][1])
#txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$'
ax.text(x =0.02, y=.15, s=txts, fontsize=14, transform=ax.transAxes)
def exm_plot():
fig, ax = plt.subplots()
ax.semilogy( q, iq, 'ro',label='data')
ax.semilogy( q, ff, '-b',label='fit')
ax.set_xlim( [0.0001, .01] )
ax.set_ylim( [1E-2,1E4] )
ax.legend(loc='best')
#plot1D( iq, q, logy=True, xlim=[0.0001, .01], ylim=[1E-3,1E4], ax=ax, legend='data')
#plot1D( ff, q, logy=True, xlim=[0.0001, .01], ax=ax, legend='cal')
#%run /XF11ID/analysis/Analysis_Pipelines/Develop/chxanalys/chxanalys/XPCS_SAXS.py
#%run /XF11ID/analysis/Analysis_Pipelines/Develop/chxanalys/chxanalys/chx_generic_functions.py
#%run /XF11ID/analysis/Analysis_Pipelines/Develop/chxanalys/chxanalys/SAXS.py
| bsd-3-clause |
behzadnouri/numpy | numpy/doc/creation.py | 52 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to NumPy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic NumPy Array Creation
==============================
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
giorgiop/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
deepakantony/sms-tools | lectures/05-Sinusoidal-model/plots-code/peaks-on-spectrogram.py | 3 | 1313 | import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/speech-male.wav'))
start = 1.25
end = 1.79
x1 = x[start*fs:end*fs]
w = np.hamming(801)
N = 2048
H = 200
t = -70
minSineDur = 0
maxnSines = 150
freqDevOffset = 10
freqDevSlope = 0.001
mX, pX = STFT.stftAnal(x1, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x1, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
plt.figure(1, figsize=(9.5, 7))
maxplotfreq = 800.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, 'x', color='k', markeredgewidth=1.5)
plt.autoscale(tight=True)
plt.title('mX + spectral peaks (speech-male.wav)')
plt.tight_layout()
plt.savefig('peaks-on-spectrogram.png')
plt.show() | agpl-3.0 |
marshallmcdonnell/journals | journals/databases/icat/sns/interface.py | 1 | 12783 | #!/usr/bin/env python
'''
Original author: Ricardo Ferraz Leal (ORNL)
Current version by: Marshall McDonnell
'''
from __future__ import print_function
import json
import decimal
import pandas
from journals.utilities import parse_datetime
from journals.databases.icat.sns.communicate import SnsICat
class SnsICatInterface(object):
def __init__(self):
self.icat = SnsICat()
self.key_list = ['ipts', 'duration', 'startTime', 'totalCounts', 'protonCharge', 'title']
# Utils
#------
@staticmethod
def _hyphen_range(s):
""" Takes a range in form of "a-b" and generate a list of numbers between a and b inclusive.
Also accepts comma separated ranges like "a-b,c-d,f" will build a list which will include
Numbers from a to b, a to d and f"""
s = "".join(s.split()) # removes white space
r = set()
for x in s.split(','):
t = x.split('-')
if len(t) not in [1, 2]:
logger.error("hash_range is given its arguement as " + s + " which seems not correctly formated.")
r.add(int(t[0])) if len(t) == 1 else r.update(set(range(int(t[0]), int(t[1]) + 1)))
l = list(r)
l.sort()
l_in_str = ','.join(str(x) for x in l)
return l_in_str
def _substitute_keys_in_dictionary(self,obj,old_key,new_key):
if isinstance(obj, dict):
if old_key in obj:
obj[new_key]=obj.pop(old_key)
return {k: self._substitute_keys_in_dictionary(v,old_key,new_key) for k, v in obj.items()}
elif isinstance(obj, list):
return [self._substitute_keys_in_dictionary(elem,old_key,new_key) for elem in obj]
def _convert_to_datetime(self,obj,key):
if isinstance(obj, dict):
if key in obj:
obj[key] = parse_datetime(obj[key])
return {k: self._convert_to_datetime(v,key) for k, v in obj.items()}
elif isinstance(obj, list):
return [self._convert_to_datetime(elem,key) for elem in obj]
# Functions
#----------
def get_instruments(self):
json_data = self.icat.get_instruments()
if json_data is not None and 'instrument' in json_data:
return json_data['instrument']
else:
raise Exception("ICAT did not return the expected result....")
def get_experiments(self,instrument):
json_data = self.icat.get_experiments(instrument)
return json_data
def get_experiments_meta(self, instrument):
json_data = self.icat.get_experiments_meta(instrument)
if json_data is not None and 'proposal' in json_data:
json_data = json_data['proposal']
else:
raise Exception("ICAT did not return the expected result....")
self._substitute_keys_in_dictionary(json_data,'@id','id')
self._convert_to_datetime(json_data,'createTime')
return json_data
def get_experiments_id_and_title(self,instrument):
json_data = self.get_experiments_meta(instrument)
json_data = { (int(entry['id'].split('-')[1]), entry['title']) for entry in json_data }
return json_data
def get_experiments_id_and_date(self,instrument):
json_data = self.get_experiments_meta(instrument)
json_data = { (int(entry['id'].split('-')[1]), entry['createTime']) for entry in json_data }
return json_data
def get_runs_all(self,instrument,experiment):
json_data = self.icat.get_runs_all(instrument,experiment)
self._substitute_keys_in_dictionary(json_data,'@id','id')
self._convert_to_datetime(json_data,'createTime')
self._convert_to_datetime(json_data,'startTime')
self._convert_to_datetime(json_data,'endTime')
return json_data
def get_runs(self,instrument,experiment):
raw_ranges = self.icat.get_run_ranges(instrument,experiment)
if raw_ranges is not None and 'runRange' in raw_ranges:
ranges = self._hyphen_range(raw_ranges["runRange"])
else:
raise Exception("ICAT did not return the expected result....")
return json.loads( "[" + ranges + "]" )
def get_runs_meta(self,instrument,experiment):
raw_ranges = self.icat.get_run_ranges_meta(instrument,experiment)
# TODO - Need to change to handle IPTS that return mutliple proposals as a list of dictionaries
if type(raw_ranges['proposal']) == list:
#ranges = ','.join([ self._hyphen_range(item['runRange']) for item in raw_ranges['proposal'] ])
raw_ranges['proposal'] = raw_ranges['proposal'][0]
ranges = self._hyphen_range(raw_ranges['proposal']['runRange'])
raw_ranges['proposal']['runRange'] = ranges
self._substitute_keys_in_dictionary(raw_ranges,'@id','id')
self._convert_to_datetime(raw_ranges,'createTime')
return raw_ranges
def get_run_number_and_title(self,instrument,experiment):
json_data = self.icat.get_runs_all(instrument,experiment)
if json_data is not None and 'proposal' in json_data:
try:
json_data = json_data['proposal']['runs']['run']
except:
raise Exception("ICAT did not return the expected result....")
else:
raise Exception("ICAT did not return the expected result....")
self._substitute_keys_in_dictionary(json_data,'@id','id')
data_list = list()
for entry in json_data:
title = None
if 'title' in entry:
title = entry['title']
data_list.append([entry['id'],title])
json_data_subset = {"data" : data_list}
return json_data_subset
def get_user_experiments(self,uid):
json_data = self.icat.get_user_experiments(uid)
if json_data is not None and 'proposals' in json_data:
return json_data['proposals']
else:
raise Exception("ICAT did not return the expected result....")
# Unit Functions
#---------------
def _get_list_of_all_ipts(self):
uri = self._ipts_uri
json_data = self._uri2xml2json(uri)
for x in json_data['proposals']['proposal']:
if isinstance(x['$'], str):
if x['$'].startswith('IPTS'):
self._ipts_list.append(int(x['$'].split('-')[1].split('.')[0]))
def _get_runs_from_ipts(self,data):
return [ element.get('id') for element in data.iter() if element.tag == 'run' ]
def _get_los_for_run(self,run,json_data):
json_metadata = json_data['metadata']
try:
ipts_pulled = json_metadata['proposal']['$'].split('-')[1]
except:
ipts_pulled = None
los_data = dict()
uid = run
meta_dict = self._get_meta_for_run(json_metadata)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
self._update_master_los(los_data)
'''
NOTE: Below, the check for list is specific to IPTSs w/ proposal lists. These are:
Index IPTS
----- ----
88 8814
119 9818
'''
def _get_meta_for_ipts(self,runs,proposal_json):
if type(proposal_json) == list:
ipts_pulled = int(proposal_json[0]['@id'].split('-')[1])
runs_data = process_numbers(proposal_json[0]['runRange']['$'])
for i, proposal in enumerate(proposal_json[1:]):
runs_data += process_numbers(proposal_json[0]['runRange']['$'])
startTime = [(':'.join( proposal_json[0]['createTime']['$'].split(':')[0:3])).split('.')[0]]
for i, proposal in enumerate(proposal_json[1:]):
startTime += [(':'.join( proposal_json[i+1]['createTime']['$'].split(':')[0:3])).split('.')[0]]
else:
ipts_pulled = int(proposal_json['@id'].split('-')[1])
runs_data = process_numbers(proposal_json['runRange']['$'])
startTime = [(':'.join( proposal_json['createTime']['$'].split(':')[0:3])).split('.')[0]]
meta_ipts_data = dict()
meta_ipts_data[ipts_pulled] = {'runs' : runs_data,
'createtime' : startTime}
self._update_master_meta_ipts_data(meta_ipts_data)
def _update_master_meta_ipts_data(self,meta_ipts_data):
self._meta_ipts_data.update(meta_ipts_data)
def _get_los_for_ipts(self,runs,proposal_json):
if type(proposal_json) == list:
ipts_pulled = int(proposal_json[0]['@id'].split('-')[1])
runs_data = proposal_json[0]['runs']['run']
for i, proposal in enumerate(proposal_json[1:]):
runs_data += proposal_json[i+1]['runs']['run']
else:
ipts_pulled = int(proposal_json['@id'].split('-')[1])
runs_data = proposal_json['runs']['run']
los_data = dict()
if len(runs) == 1:
uid = proposal_json['runs']['run']['@id']
x = proposal_json['runs']['run']
meta_dict = self._get_meta_for_run(x)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
else:
for x in runs_data:
uid = x['@id']
meta_dict = self._get_meta_for_run(x)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
self._update_master_los(los_data)
def _update_master_los(self,los_data):
self._los_data.update(los_data)
def _get_meta_for_run(self,metadata):
meta = dict.fromkeys(self.key_list)
for key in self.key_list:
if key in metadata:
if key == 'duration':
meta[key] = str(int(float(metadata[key]['$'])/60.))+'min'
elif key == 'startTime':
meta[key] = (':'.join( metadata[key]['$'].split(':')[0:3])).split('.')[0]
elif key == 'totalCounts':
meta[key] = '{:.2E}'.format(decimal.Decimal(metadata[key]['$']))
elif key == 'protonCharge':
meta[key] = float("{0:.2f}".format(metadata[key]['$'] / 1e12) )
else:
meta[key] = metadata[key]['$']
return meta
# Main Functions
#------------------
def initializeMetaIptsData(self):
ipts_list = self.getListOfIPTS()
self.getIPTSs( ipts_list, data='meta')
def getMetaIptsData(self):
return self._meta_ipts_data
def applyIptsFilter(self,ipts_list):
self.reset_los()
self.getIPTSs(ipts_list)
def getDataFrame(self):
data = self.get_los()
df = pandas.DataFrame.from_dict(data,orient='index')
df = df.reset_index()
df = df.rename(columns={'index': '#Scan', 'duration': 'time', 'protonCharge': 'PC/pC'})
col_order = ['#Scan', 'ipts', 'time', 'startTime', 'totalCounts', 'PC/pC', 'title']
df = df[col_order]
return df
def getListOfIPTS(self):
if not self._ipts_list:
self._get_list_of_all_ipts()
return sorted(self._ipts_list)
def getIPTSs(self,proposals,**kwargs):
for i, ipts in enumerate(proposals):
self.getIPTS(ipts,**kwargs)
def getIPTS(self,ipts,data='all'):
uri = self._ipts_uri + "/IPTS-"+str(ipts)+"/"+data
xml_data = self._uri2xml(uri)
runs = self._get_runs_from_ipts(xml_data)
json_data = self._xml2json(xml_data)
if data == 'all':
try:
self._get_los_for_ipts(runs,json_data['proposals']['proposal'])
except KeyError:
print(ipts, json_data['proposals'])
if data == 'meta':
self._get_meta_for_ipts(runs,json_data['proposals']['proposal'])
def getRun(self,run):
uri = self._run_uri+'/'+ str(run)+"/metaOnly"
json_data = self._uri2xml2json(uri)
self._get_los_for_run(run, json_data)
def reset_los(self):
self._los_data = dict()
def get_los(self):
return self._los_data
def print_runs(self):
if self._runs is None:
self._get_runs()
for run in self._runs:
print(run)
def print_los(self):
if self._los_data is None:
print(self._los_data, "( No data yet in los dictionary. )")
los_data = self._los_data
print("#Scan IPTS time starttime totalCounts PC/C title")
for run in sorted(los_data.keys()):
print(run, end=' ')
for key in self.key_list:
print(los_data[run][key], end=' ')
print()
| mit |
rhyolight/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backend_bases.py | 69 | 69740 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
def open_group(self, s):
"""
Open a grouping element with label *s*. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
tpath = trans.transform_path(path)
for vertices, codes in tpath.iter_segments():
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
"""
Draws a collection of paths, selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before
being applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
draw_path. Some backends may want to override this in order
to render each set of path data only once, and then reference
that path multiple times with the different offsets, colors,
styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc, path, transform, rgbFace)
def draw_quad_mesh(self, master_transform, cliprect, clippath,
clippath_trans, meshWidth, meshHeight, coordinates,
offsets, offsetTrans, facecolors, antialiased,
showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([1.0], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([0.0], np.float_)
return self.draw_path_collection(
master_transform, cliprect, clippath, clippath_trans,
paths, [], offsets, offsetTrans, facecolors, edgecolors,
linewidths, [], [antialiased], [None])
def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc = self.new_gc()
gc.set_clip_rectangle(cliprect)
if clippath is not None:
clippath = transforms.TransformedPath(clippath, clippath_trans)
gc.set_clip_path(clippath)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
gc.set_foreground(edgecolors[i % Nedgecolors])
if Nlinewidths:
gc.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc.set_dashes(*linestyles[i % Nlinestyles])
if rgbFace is not None and len(rgbFace)==4:
gc.set_alpha(rgbFace[-1])
rgbFace = rgbFace[:3]
gc.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc, rgbFace
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the image instance into the current axes;
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
*bbox*
a :class:`matplotlib.transforms.Bbox` instance for clipping, or
None
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
overwrite this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
raise NotImplementedError
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text in display coords
*s*
a :class:`matplotlib.text.Text` instance
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
raise NotImplementedError
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
raise NotImplementedError
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
pass
def stop_rasterizing(self):
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid' : (None, None),
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'miter'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0)
self._hatch = None
self._url = None
self._snap = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._snap = gc._snap
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three floats from 0-1. color can be a
matlab format string, a html hex color string, or a rgb tuple
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
"""
self._alpha = alpha
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b: self._antialiased = 1
else: self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points. ``(None, None)`` specifies a solid line
"""
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGB=False):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
The :class:`GraphicsContextBase` converts colors to rgb
internally. If you know the color is rgb already, you can set
``isRGB=True`` to avoid the performace hit of the conversion
"""
if isRGB:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._rgb = (frac, frac, frac)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
try:
offset, dashes = self.dashd[style]
except:
raise ValueError('Unrecognized linestyle: %s' % style)
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas,guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class LocationEvent(Event):
"""
A event that has a screen location
The following additional attributes are defined and shown with
their default values
In addition to the :class:`Event` attributes, the following event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y,guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas,guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
axes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)
axes_list.sort(axCmp)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes!=self.inaxes:
# process axes enter/leave events
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', 'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events)
*key*
the key pressed: None, chr(range(255), 'shift', 'win', or 'control'
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print 'on pick line:', zip(xdata[ind], ydata[ind])
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key pressed: None, chr(range(255), shift, win, or control
This interface may change slightly when better support for
modifier keys is included.
Example usage::
def on_key(event):
print 'you pressed', event.key, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase:
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event'
]
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry(self.events)
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event',self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event',self.pick)
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event',self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [ (h.zorder, h) for h in artists ]
L.sort()
return [ h for zorder, h in L ]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under: h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
print "Removing",h
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self,'_active'): self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a,'get_color'):
a.set_color(self._active[a])
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a,'get_color'):
self._active[a] = a.get_color()
elif hasattr(a,'get_edgecolor'):
self._active[a] = (a.get_edgecolor(),a.get_facecolor())
else: self._active[a] = None
for a in enter:
if hasattr(a,'get_color'):
a.set_color('red')
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else: self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
def enter_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
'call when GUI is idle'
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'emf': 'Enhanced Metafile',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'png': 'Portable Network Graphics',
'ps' : 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
def print_emf(self, *args, **kwargs):
from backends.backend_emf import FigureCanvasEMF # lazy import
emf = self.switch_backends(FigureCanvasEMF)
return emf.print_emf(*args, **kwargs)
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgb = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
def get_supported_filetypes(self):
return self.filetypes
def get_supported_filetypes_grouped(self):
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation* '
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
"""
if format is None:
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
method_name = 'print_%s' % format
if (format not in self.filetypes or
not hasattr(self, method_name)):
formats = self.filetypes.keys()
formats.sort()
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
try:
result = getattr(self, method_name)(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
**kwargs)
finally:
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
raise NotImplementedError
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def switch_backends(self, FigureCanvasClass):
"""
instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self,timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self,timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str,DeprecationWarning)
if timeout <= 0: timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter*timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
class FigureManagerBase:
"""
Helper class for matlab mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure nuamber
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.canvas.mpl_connect('key_press_event', self.key_press)
def destroy(self):
pass
def full_screen_toggle (self):
pass
def resize(self, w, h):
'For gui backends: resize window in pixels'
pass
def key_press(self, event):
# these bindings happen whether you are over an axes or not
#if event.key == 'q':
# self.destroy() # how cruel to have to destroy oneself!
# return
if event.key == 'f':
self.full_screen_toggle()
# *h*ome or *r*eset mnemonic
elif event.key == 'h' or event.key == 'r' or event.key == "home":
self.canvas.toolbar.home()
# c and v to enable left handed quick navigation
elif event.key == 'left' or event.key == 'c' or event.key == 'backspace':
self.canvas.toolbar.back()
elif event.key == 'right' or event.key == 'v':
self.canvas.toolbar.forward()
# *p*an mnemonic
elif event.key == 'p':
self.canvas.toolbar.pan()
# z*o*om mnemonic
elif event.key == 'o':
self.canvas.toolbar.zoom()
elif event.key == 's':
self.canvas.toolbar.save_figure(self.canvas.toolbar)
if event.inaxes is None:
return
# the mouse has to be over an axes to trigger these
if event.key == 'g':
event.inaxes.grid()
self.canvas.draw()
elif event.key == 'l':
ax = event.inaxes
scale = ax.get_yscale()
if scale=='log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale=='linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
elif event.key is not None and (event.key.isdigit() and event.key!='0') or event.key=='a':
# 'a' enables all axes
if event.key!='a':
n=int(event.key)-1
for i, a in enumerate(self.canvas.figure.get_axes()):
if event.x is not None and event.y is not None and a.in_axes(event):
if event.key=='a':
a.set_navigate(True)
else:
a.set_navigate(i==n)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
pass
# cursors
class Cursors: #namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2:
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self._button_pressed = None # determined by the button pressed at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
'display a message on toolbar or in status bar'
pass
def back(self, *args):
'move back up the view lim stack'
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'draw a rectangle rubberband to indicate zoom limits'
pass
def forward(self, *args):
'move forward in the view lim stack'
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
'restore the original view'
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
#print 'mouse_move', event.button
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active=='ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif (self._active=='PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.mode):
self.set_message('%s : %s' % (self.mode, s))
else:
self.set_message(s)
else: self.set_message(self.mode)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
'this will be called whenver a mouse button is pressed'
pass
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) and a.get_navigate():
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.drag_pan)
self.press(event)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) \
and a.get_navigate() and a.can_zoom():
self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))
self.press(event)
def push_current(self):
'push the current view limits and position onto the stack'
lims = []; pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append( (xmin, xmax, ymin, ymax) )
# Store both the original and modified positions
pos.append( (
a.get_position(True).frozen(),
a.get_position().frozen() ) )
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
'this will be called whenever mouse button is released'
pass
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress: return
self._xypress = []
self._button_pressed=None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
if not self._xypress: return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point( (lastx, lasty) )
x, y = inverse.transform_point( (x, y) )
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a,la): twinx=True
if a.get_shared_y_axes().joined(a,la): twiny=True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x<lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 < Xmin: x0=Xmin
if x1 > Xmax: x1=Xmax
else:
if x>lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 > Xmin: x0=Xmin
if x1 < Xmax: x1=Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y<lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 < Ymin: y0=Ymin
if y1 > Ymax: y1=Ymax
else:
if y>lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 > Ymin: y0=Ymin
if y1 < Ymax: y1=Ymax
if self._button_pressed == 1:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale()=='log':
alpha=np.log(Xmax/Xmin)/np.log(x1/x0)
rx1=pow(Xmin/x0,alpha)*Xmin
rx2=pow(Xmax/x0,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(x1-x0)
rx1=alpha*(Xmin-x0)+Xmin
rx2=alpha*(Xmax-x0)+Xmin
if a.get_yscale()=='log':
alpha=np.log(Ymax/Ymin)/np.log(y1/y0)
ry1=pow(Ymin/y0,alpha)*Ymin
ry2=pow(Ymax/y0,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(y1-y0)
ry1=alpha*(Ymin-y0)+Ymin
ry2=alpha*(Ymax-y0)+Ymin
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self.push_current()
self.release(event)
def draw(self):
'redraw the canvases, update the locators'
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
'''update the viewlim and position from the view and
position stack for each axes
'''
lims = self._views()
if lims is None: return
pos = self._positions()
if pos is None: return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position( pos[i][0], 'original' )
a.set_position( pos[i][1], 'active' )
self.draw()
def save_figure(self, *args):
'save the current figure'
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
'reset the axes stack'
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
'activate zoom to rect mode'
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress=self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease=self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.mode = 'Zoom to rect mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
'enable or disable back/forward button'
pass
| agpl-3.0 |
scipy/scipy | scipy/special/_precompute/struve_convergence.py | 12 | 3456 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
import numpy as np
import matplotlib.pyplot as plt # type: ignore[import]
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import (_struve_power_series,
_struve_asymp_large_z,
_struve_bessel_series)
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
roxyboy/bokeh | examples/plotting/file/boxplot.py | 43 | 2269 | import numpy as np
import pandas as pd
from bokeh.plotting import figure, show, output_file
# Generate some synthetic time series for six different categories
cats = list("abcdef")
yy = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
yy[g == l] += i // 2
df = pd.DataFrame(dict(score=yy, group=g))
# Find the quartiles and IQR foor each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
output_file("boxplot.html")
p = figure(tools="save", background_fill="#EFE8E2", title="", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# stems
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
p.segment(cats, lower.score, cats, q1.score, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
p.rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,
fill_color="#3B8686", line_width=2, line_color="black")
# whiskers (almost-0 height rects simpler than segments)
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# outliers
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.grid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
show(p)
| bsd-3-clause |
xubenben/scikit-learn | sklearn/metrics/tests/test_common.py | 83 | 41144 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
"brier_score_loss",
"label_ranking_loss",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
mick-d/nipype | nipype/info.py | 1 | 6276 | """ This file contains defines parameters for nipy that we use to fill
settings in setup.py, the nipy top-level docstring, and for building the
docs. In setup.py in particular, we exec this file, so it cannot import nipy
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import sys
# nipype version information. An empty version_extra corresponds to a
# full release. '.dev' as a version_extra string means this is a development
# version
# Remove -dev for release
__version__ = '1.0.0-dev'
def get_nipype_gitversion():
"""Nipype version as reported by the last commit in git
Returns
-------
None or str
Version of Nipype according to git.
"""
import os
import subprocess
try:
import nipype
gitpath = os.path.realpath(os.path.join(os.path.dirname(nipype.__file__),
os.path.pardir))
except:
gitpath = os.getcwd()
gitpathgit = os.path.join(gitpath, '.git')
if not os.path.exists(gitpathgit):
return None
ver = None
try:
o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath,
stdout=subprocess.PIPE).communicate()
except Exception:
pass
else:
ver = o.decode().strip().split('-')[-1]
return ver
if __version__.endswith('-dev'):
gitversion = get_nipype_gitversion()
if gitversion:
__version__ = '{}+{}'.format(__version__, gitversion)
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering']
description = 'Neuroimaging in Python: Pipelines and Interfaces'
# Note: this long_description is actually a copy/paste from the top-level
# README.txt, so that it shows up nicely on PyPI. So please remember to edit
# it only in one place and sync it correctly.
long_description = """========================================================
NIPYPE: Neuroimaging in Python: Pipelines and Interfaces
========================================================
Current neuroimaging software offer users an incredible opportunity to \
analyze data using a variety of different algorithms. However, this has \
resulted in a heterogeneous collection of specialized applications \
without transparent interoperability or a uniform operating interface.
*Nipype*, an open-source, community-developed initiative under the \
umbrella of NiPy_, is a Python project that provides a uniform interface \
to existing neuroimaging software and facilitates interaction between \
these packages within a single workflow. Nipype provides an environment \
that encourages interactive exploration of algorithms from different \
packages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE, \
MRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and \
between packages, and reduces the learning curve necessary to use different \
packages. Nipype is creating a collaborative platform for neuroimaging software \
development in a high-level language and addressing limitations of existing \
pipeline systems.
*Nipype* allows you to:
* easily interact with tools from different software packages
* combine processing steps from different software packages
* develop new workflows faster by reusing common steps from old ones
* process data faster by running it in parallel on many cores/machines
* make your research easily reproducible
* share your processing workflows with the community
"""
# versions
NIBABEL_MIN_VERSION = '2.1.0'
NETWORKX_MIN_VERSION = '1.9'
NUMPY_MIN_VERSION = '1.9.0'
SCIPY_MIN_VERSION = '0.14'
TRAITS_MIN_VERSION = '4.6'
DATEUTIL_MIN_VERSION = '2.2'
PYTEST_MIN_VERSION = '3.0'
FUTURE_MIN_VERSION = '0.16.0'
SIMPLEJSON_MIN_VERSION = '3.8.0'
PROV_VERSION = '1.5.0'
CLICK_MIN_VERSION = '6.6.0'
PYDOT_MIN_VERSION = '1.2.3'
NAME = 'nipype'
MAINTAINER = 'nipype developers'
MAINTAINER_EMAIL = '[email protected]'
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = 'http://nipy.org/nipype'
DOWNLOAD_URL = 'http://github.com/nipy/nipype/archives/master'
LICENSE = 'Apache License, 2.0'
CLASSIFIERS = CLASSIFIERS
AUTHOR = 'nipype developers'
AUTHOR_EMAIL = '[email protected]'
PLATFORMS = 'OS Independent'
MAJOR = __version__.split('.')[0]
MINOR = __version__.split('.')[1]
MICRO = __version__.replace('-', '.').split('.')[2]
ISRELEASE = (len(__version__.replace('-', '.').split('.')) == 3 or
'post' in __version__.replace('-', '.').split('.')[-1])
VERSION = __version__
PROVIDES = ['nipype']
REQUIRES = [
'nibabel>=%s' % NIBABEL_MIN_VERSION,
'networkx>=%s' % NETWORKX_MIN_VERSION,
'numpy>=%s' % NUMPY_MIN_VERSION,
'python-dateutil>=%s' % DATEUTIL_MIN_VERSION,
'scipy>=%s' % SCIPY_MIN_VERSION,
'traits>=%s' % TRAITS_MIN_VERSION,
'future>=%s' % FUTURE_MIN_VERSION,
'simplejson>=%s' % SIMPLEJSON_MIN_VERSION,
'prov==%s' % PROV_VERSION,
'click>=%s' % CLICK_MIN_VERSION,
'funcsigs',
'pytest>=%s' % PYTEST_MIN_VERSION,
'mock',
'pydotplus',
'pydot>=%s' % PYDOT_MIN_VERSION,
'packaging',
]
if sys.version_info <= (3, 4):
REQUIRES.append('configparser')
TESTS_REQUIRES = [
'pytest-cov',
'codecov'
]
EXTRA_REQUIRES = {
'doc': ['Sphinx>=1.4', 'matplotlib', 'pydotplus', 'pydot>=1.2.3'],
'tests': TESTS_REQUIRES,
'nipy': ['nitime', 'nilearn', 'dipy', 'nipy', 'matplotlib'],
'profiler': ['psutil>=5.0'],
'duecredit': ['duecredit'],
'xvfbwrapper': ['xvfbwrapper'],
'pybids' : ['pybids']
# 'mesh': ['mayavi'] # Enable when it works
}
# Enable a handle to install all extra dependencies at once
EXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())]
STATUS = 'stable'
| bsd-3-clause |
andaag/scikit-learn | sklearn/cluster/birch.py | 207 | 22706 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
lekshmideepu/nest-simulator | pynest/examples/mc_neuron.py | 8 | 7424 | # -*- coding: utf-8 -*-
#
# mc_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Multi-compartment neuron example
--------------------------------
Simple example of how to use the three-compartment ``iaf_cond_alpha_mc``
neuron model.
Three stimulation paradigms are illustrated:
- externally applied current, one compartment at a time
- spikes impinging on each compartment, one at a time
- rheobase current injected to soma causing output spikes
Voltage and synaptic conductance traces are shown for all compartments.
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
##############################################################################
# We then extract the receptor types and the list of recordable quantities
# from the neuron model. Receptor types and recordable quantities uniquely
# define the receptor type and the compartment while establishing synaptic
# connections or assigning multimeters.
syns = nest.GetDefaults('iaf_cond_alpha_mc')['receptor_types']
print(f"iaf_cond_alpha_mc receptor_types: {syns}")
rqs = nest.GetDefaults('iaf_cond_alpha_mc')['recordables']
print(f"iaf_cond_alpha_mc recordables : {rqs}")
###############################################################################
# The simulation parameters are assigned to variables.
params = {'V_th': -60.0, # threshold potential
'V_reset': -65.0, # reset potential
't_ref': 10.0, # refractory period
'g_sp': 5.0, # somato-proximal coupling conductance
'soma': {'g_L': 12.0}, # somatic leak conductance
# proximal excitatory and inhibitory synaptic time constants
'proximal': {'tau_syn_ex': 1.0,
'tau_syn_in': 5.0},
'distal': {'C_m': 90.0} # distal capacitance
}
###############################################################################
# The nodes are created using ``Create``. We store the returned handles
# in variables for later reference.
n = nest.Create('iaf_cond_alpha_mc', params=params)
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create('multimeter', params={'record_from': rqs, 'interval': 0.1})
nest.Connect(mm, n)
###############################################################################
# We create one current generator per compartment and configure a stimulus
# regime that drives distal, proximal and soma dendrites, in that order.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current.
cgs = nest.Create('dc_generator', 3)
cgs[0].set(start=250.0, stop=300.0, amplitude=50.0) # soma
cgs[1].set(start=150.0, stop=200.0, amplitude=-50.0) # proxim.
cgs[2].set(start=50.0, stop=100.0, amplitude=100.0) # distal
###############################################################################
# Generators are then connected to the correct compartments. Specification of
# the ``receptor_type`` uniquely defines the target compartment and receptor.
nest.Connect(cgs[0], n, syn_spec={'receptor_type': syns['soma_curr']})
nest.Connect(cgs[1], n, syn_spec={'receptor_type': syns['proximal_curr']})
nest.Connect(cgs[2], n, syn_spec={'receptor_type': syns['distal_curr']})
###############################################################################
# We create one excitatory and one inhibitory spike generator per compartment
# and configure a regime that drives distal, proximal and soma dendrites, in
# that order, alternating the excitatory and inhibitory spike generators.
sgs = nest.Create('spike_generator', 6)
sgs[0].spike_times = [600.0, 620.0] # soma excitatory
sgs[1].spike_times = [610.0, 630.0] # soma inhibitory
sgs[2].spike_times = [500.0, 520.0] # proximal excitatory
sgs[3].spike_times = [510.0, 530.0] # proximal inhibitory
sgs[4].spike_times = [400.0, 420.0] # distal excitatory
sgs[5].spike_times = [410.0, 430.0] # distal inhibitory
###############################################################################
# Connect generators to correct compartments in the same way as in case of
# current generator
nest.Connect(sgs[0], n, syn_spec={'receptor_type': syns['soma_exc']})
nest.Connect(sgs[1], n, syn_spec={'receptor_type': syns['soma_inh']})
nest.Connect(sgs[2], n, syn_spec={'receptor_type': syns['proximal_exc']})
nest.Connect(sgs[3], n, syn_spec={'receptor_type': syns['proximal_inh']})
nest.Connect(sgs[4], n, syn_spec={'receptor_type': syns['distal_exc']})
nest.Connect(sgs[5], n, syn_spec={'receptor_type': syns['distal_inh']})
###############################################################################
# Run the simulation for 700 ms.
nest.Simulate(700)
###############################################################################
# Now we set the intrinsic current of soma to 150 pA to make the neuron spike.
n.soma = {'I_e': 150.}
###############################################################################
# We simulate the network for another 300 ms and retrieve recorded data from
# the multimeter
nest.Simulate(300)
rec = mm.events
###############################################################################
# We create an array with the time points when the quantities were actually
# recorded
t = rec['times']
###############################################################################
# We plot the time traces of the membrane potential and the state of each
# membrane potential for soma, proximal, and distal dendrites (`V_m.s`, `V_m.p`
# and `V_m.d`).
plt.figure()
plt.subplot(211)
plt.plot(t, rec['V_m.s'], t, rec['V_m.p'], t, rec['V_m.d'])
plt.legend(('Soma', 'Proximal dendrite', 'Distal dendrite'),
loc='lower right')
plt.axis([0, 1000, -76, -59])
plt.ylabel('Membrane potential [mV]')
plt.title('Responses of iaf_cond_alpha_mc neuron')
###############################################################################
# Finally, we plot the time traces of the synaptic conductance measured in
# each compartment.
plt.subplot(212)
plt.plot(t, rec['g_ex.s'], 'b-', t, rec['g_ex.p'], 'g-',
t, rec['g_ex.d'], 'r-')
plt.plot(t, rec['g_in.s'], 'b--', t, rec['g_in.p'], 'g--',
t, rec['g_in.d'], 'r--')
plt.legend(('g_ex.s', 'g_ex.p', 'g_in.d', 'g_in.s', 'g_in.p', 'g_in.d'))
plt.axis([350, 700, 0, 1.15])
plt.xlabel('Time [ms]')
plt.ylabel('Synaptic conductance [nS]')
plt.show()
| gpl-2.0 |
StuartLittlefair/astropy | astropy/visualization/units.py | 8 | 3945 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__doctest_skip__ = ['quantity_support']
def quantity_support(format='latex_inline'):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from astropy import units as u
# import Angle just so we have a more or less complete list of Quantity
# subclasses loaded - matplotlib needs them all separately!
# NOTE: in matplotlib >=3.2, subclasses will be recognized automatically,
# and once that becomes our minimum version, we can remove this,
# adding just u.Quantity itself to the registry.
from astropy.coordinates import Angle # noqa
from matplotlib import units
from matplotlib import ticker
# Get all subclass for Quantity, since matplotlib checks on class,
# not subclass.
def all_issubclass(cls):
return {cls}.union(
[s for c in cls.__subclasses__() for s in all_issubclass(c)])
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return 'π/2'
elif n == 2:
return 'π'
elif n % 2 == 0:
return f'{n / 2}π'
else:
return f'{n}π/2'
class MplQuantityConverter(units.ConversionInterface):
_all_issubclass_quantity = all_issubclass(u.Quantity)
def __init__(self):
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = {}
for cls in self._all_issubclass_quantity:
self._original_converter[cls] = units.registry.get(cls)
units.registry[cls] = self
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter('%i°'),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to_value(unit)
elif isinstance(val, list) and val and isinstance(val[0], u.Quantity):
return [v.to_value(unit) for v in val]
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, 'unit'):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
for cls in self._all_issubclass_quantity:
if self._original_converter[cls] is None:
del units.registry[cls]
else:
units.registry[cls] = self._original_converter[cls]
return MplQuantityConverter()
| bsd-3-clause |
sourabhdattawad/BuildingMachineLearningSystemsWithPython | ch04/blei_lda.py | 21 | 2601 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from wordcloud import create_cloud
try:
from gensim import corpora, models, matutils
except:
print("import gensim failed.")
print()
print("Please install it")
raise
import matplotlib.pyplot as plt
import numpy as np
from os import path
NUM_TOPICS = 100
# Check that data exists
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
print('Please cd into ./data & run ./download_ap.sh')
# Load the data
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
# Build the topic model
model = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=None)
# Iterate over all the topics in the model
for ti in range(model.num_topics):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
with open('topics.txt', 'w') as output:
output.write('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
output.write("\n\n\n")
# We first identify the most discussed topic, i.e., the one with the
# highest total weight
topics = matutils.corpus2dense(model[corpus], num_terms=model.num_topics)
weight = topics.sum(1)
max_topic = weight.argmax()
# Get the top 64 words for this topic
# Without the argument, show_topic would return only 10 words
words = model.show_topic(max_topic, 64)
# This function will actually check for the presence of pytagcloud and is otherwise a no-op
create_cloud('cloud_blei_lda.png', words)
num_topics_used = [len(model[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist(num_topics_used, np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
fig.tight_layout()
fig.savefig('Figure_04_01.png')
# Now, repeat the same exercise using alpha=1.0
# You can edit the constant below to play around with this parameter
ALPHA = 1.0
model1 = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=ALPHA)
num_topics_used1 = [len(model1[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist([num_topics_used, num_topics_used1], np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
# The coordinates below were fit by trial and error to look good
ax.text(9, 223, r'default alpha')
ax.text(26, 156, 'alpha=1.0')
fig.tight_layout()
fig.savefig('Figure_04_02.png')
| mit |
ycaihua/scikit-learn | sklearn/mixture/tests/test_gmm.py | 24 | 12725 | import unittest
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
rng = np.random.RandomState(0)
def test_sample_gaussian():
"""
Test sample generation from mixture.sample_gaussian where covariance
is diagonal, spherical and full
"""
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
"""
test a slow and naive implementation of lmvnpdf and
compare it to the vectorized version (mixture.lmvnpdf) to test
for correctness
"""
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
""" Train on degenerate data with 0 in some dimensions
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
""" Train on 1-D data
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
"""Test that multiple inits does not much worse than a single one"""
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
"""Test that the right number of parameters is estimated"""
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_aic():
""" Test the aic and bic criteria"""
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
RapidApplicationDevelopment/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 75 | 29377 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.local_variables_initializer())
if variables.global_variables():
session.run(variables.global_variables_initializer())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| apache-2.0 |
marianotepper/nmu_rfit | rnmu/pme/vanishing.py | 1 | 2695 | import numpy as np
import matplotlib.pyplot as plt
class VanishingPoint():
def __init__(self, data=None):
self.point = None
if data is not None:
self.fit(data)
@property
def min_sample_size(self):
return 2
def fit(self, data):
if len(data) < self.min_sample_size:
raise ValueError('At least two segments are needed to fit a VP')
if len(data) == self.min_sample_size:
self.point = np.cross(data[0].line, data[1].line)
self.point /= self.point[2]
else:
lines = np.array([seg.line for seg in data])
sol = np.linalg.lstsq(lines[:, :2], -lines[:, 2])
point_plane = np.append(sol[0], [1])
dists_plane = distances(point_plane, data)
angles = _normalize(np.arctan2(lines[:, 0], lines[:, 1]))
alpha = np.mean(angles)
point_inf = np.array([np.cos(alpha), np.sin(alpha), 0])
dists_inf = distances(point_inf, data)
if np.max(np.abs(dists_plane)) < np.max(np.abs(dists_inf)):
self.point = point_plane
else:
self.point = point_inf
def distances(self, data):
return distances(self.point, data)
def plot(self, **kwargs):
plt.scatter(self.point[0], self.point[1], **kwargs)
def _normalize(a):
return np.fmod(a + np.pi, np.pi)
def basis_vector(segment):
u = segment.p_a - segment.p_b
u /= np.linalg.norm(u)
return np.atleast_2d(u[:2])
def distances(point, data):
lines = np.array([seg.line for seg in data])
if point[2] != 0:
points_a, points_b = zip(*[(s.p_a, s.p_b) for s in data])
points_a = np.array(points_a)
points_b = np.array(points_b)
d_a = np.linalg.norm(points_a - point, axis=1)
d_b = np.linalg.norm(points_b - point, axis=1)
closer_points = np.copy(points_a)
closer_points[d_a > d_b] = points_b[d_a > d_b]
midpoints = (points_a + points_b) / 2
v1 = point - midpoints
v2 = closer_points - midpoints
diff = np.sum(v1 * v2, axis=1) # dot product
diff /= (np.linalg.norm(v1, axis=1) * np.linalg.norm(v2, axis=1))
angle_diff = np.arccos(diff) * np.sign(np.cross(v1, v2)[:, 2])
else:
angle_vp = _normalize(np.arctan2(point[1], point[0]))
angle_lines = _normalize(np.arctan2(lines[:, 0], lines[:, 1]))
angle_diff = angle_vp - angle_lines
mask = angle_diff > np.pi / 2
angle_diff[mask] = np.pi - angle_diff[mask]
mask = angle_diff < -np.pi / 2
angle_diff[mask] = np.pi + angle_diff[mask]
return np.abs(angle_diff) | bsd-3-clause |
exa-analytics/exatomic | exatomic/core/atom.py | 2 | 22735 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Atomic Position Data
############################
This module provides a collection of dataframes supporting nuclear positions,
forces, velocities, symbols, etc. (all data associated with atoms as points).
"""
from numbers import Integral
import numpy as np
import pandas as pd
from exa import DataFrame, Series
from exa.util.units import Length
from exatomic.base import sym2z, sym2mass
from exatomic.algorithms.distance import modv
from exatomic.core.error import PeriodicUniverseError
from exatomic.algorithms.geometry import make_small_molecule
from exatomic import plotter
class Atom(DataFrame):
"""
The atom dataframe.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| x | float | position in x (req.) |
+-------------------+----------+-------------------------------------------+
| y | float | position in y (req.) |
+-------------------+----------+-------------------------------------------+
| z | float | position in z (req.) |
+-------------------+----------+-------------------------------------------+
| frame | category | non-unique integer (req.) |
+-------------------+----------+-------------------------------------------+
| symbol | category | element symbol (req.) |
+-------------------+----------+-------------------------------------------+
| fx | float | force in x |
+-------------------+----------+-------------------------------------------+
| fy | float | force in y |
+-------------------+----------+-------------------------------------------+
| fz | float | force in z |
+-------------------+----------+-------------------------------------------+
| vx | float | velocity in x |
+-------------------+----------+-------------------------------------------+
| vy | float | velocity in y |
+-------------------+----------+-------------------------------------------+
| vz | float | velocity in z |
+-------------------+----------+-------------------------------------------+
"""
_index = 'atom'
_cardinal = ('frame', np.int64)
_categories = {'symbol': str, 'set': np.int64, 'molecule': np.int64,
'label': np.int64}
_columns = ['x', 'y', 'z', 'symbol']
#@property
#def _constructor(self):
# return Atom
@property
def nframes(self):
"""Return the total number of frames in the atom table."""
return np.int64(self.frame.cat.as_ordered().max() + 1)
@property
def last_frame(self):
"""Return the last frame of the atom table."""
return self[self.frame == self.nframes - 1]
@property
def unique_atoms(self):
"""Return unique atom symbols of the last frame."""
return self.last_frame.symbol.unique()
@staticmethod
def _determine_center(attr, coords):
"""Determine the center of the molecule with respect to
the given attribute data. Used for the center of nuclear
charge and center of mass."""
center = 1/np.sum(attr)*np.sum(np.multiply(np.transpose(coords), attr), axis=1)
center = pd.Series(center, index=['x', 'y', 'z'])
return center
def center(self, idx=None, frame=None, to=None):
"""
Return a copy of a single frame of the atom table
centered around a specific atom index. There is also
the ability to center the molecule to the center of
nuclear charge (NuclChrg) or center of mass (Mass).
Args:
idx (int): Atom index in the atom table
frame (int): Frame to perform the operation on
to (str): Tells the program which centering algorithm to use
Returs:
frame (:class:`exatomic.Universe.atom`): Atom frame
"""
if frame is None: frame = self.last_frame.copy()
else: frame = self[self.frame == frame].copy()
if to is None:
if idx is None: raise TypeError("Must provide an atom to center to")
center = frame.iloc[idx]
elif to == 'NuclChrg':
try:
Z = frame['Z'].astype(int).values
except KeyError:
Z = frame['symbol'].map(sym2z).astype(int).values
center = self._determine_center(attr=Z, coords=frame[['x', 'y', 'z']].values)
elif to == 'Mass':
mass = frame['symbol'].map(sym2mass).astype(int).values
center = self._determine_center(attr=mass, coords=frame[['x', 'y', 'z']].values)
else:
raise NotImplementedError("Sorry the centering option {} is not available".format(to))
for r in ['x', 'y', 'z']:
if center[r] > 0: frame[r] = frame[r] - center[r]
else: frame[r] = frame[r] + np.abs(center[r])
return Atom(frame)
def rotate(self, theta, axis=None, frame=None, degrees=True):
"""
Return a copy of a single frame of the atom table rotated
around the specified rotation axis by the specified angle.
As we have the rotation axis and the rotation angle we are
able to use the Rodrigues' formula to get the rotated
vectors.
Args:
theta (float): The angle that you wish to rotate by
axis (list): The axis of rotation
frame (int): The frame that you wish to rotate
degrees (bool): If true convert from degrees to radians
Returns:
frame (:class:`exatomic.Universe.atom`): Atom frame
"""
if axis is None: axis = [0, 0, 1]
if frame is None: frame = self.last_frame.copy()
else: frame = self[self.frame == frame].copy()
if all(map(lambda x: x == 0., axis)) or theta == 0.: return frame
# as we have the rotation axis and the angle we will rotate over
# we implement the Rodrigues' rotation formula
# v_rot = v*np.cos(theta) + (np.cross(k,v))*np.sin(theta) + k*(np.dot(k,v))*(1-np.cos(theta))
# convert units if not degrees
if degrees: theta = theta*np.pi/180.
# normalize rotation axis vector
norm = np.linalg.norm(axis)
try:
axis /= norm
except ZeroDivisionError:
raise ZeroDivisionError("Trying to normalize axis {} by a 0 value".format(axis))
# get the coordinates
coords = frame[['x', 'y', 'z']].values
# generate the first term in rodrigues formula
a = coords * np.cos(theta)
# generate second term in rodrigures formula
# this creates a matrix of size coords.shape[0]
b = np.cross(axis, coords) * np.sin(theta)
# generate the last term in rodrigues formula
# we use np.outer to make a dyadic productof the result from the dot product vector
# and the axis vector
c = np.outer(np.dot(coords, axis), axis) * (1-np.cos(theta))
rotated = a + b + c
frame[['x', 'y', 'z']] = rotated
return Atom(frame)
def translate(self, dx=0, dy=0, dz=0, vector=None, frame=None, units='au'):
"""
Return a copy of a single frame of the atom table translated by
some specified distance.
Note:
Vector can be used instead of dx, dy, dz as it will be decomposed
into those components. If vector and any of the others are
specified the values in vector will be used.
Args:
dx (float): Displacement distance in x
dy (float): Displacement distance in y
dz (float): Displacement distance in z
vector (list): Displacement vector
units (str): Units that are used for the displacement
Returns:
frame (:class:`exatomic.Universe.atom`): Atom frame
"""
if frame is None: frame = self.last_frame.copy()
else: frame = self[self.frame == frame].copy()
# check if vector is specified
if vector is not None:
# convert vector units to au
vector = [i * Length[units, 'au'] for i in vector]
dx = vector[0]
dy = vector[1]
dz = vector[2]
# add the values to each respective coordinate
frame['x'] += dx
frame['y'] += dy
frame['z'] += dz
return Atom(frame)
def align_to_axis(self, adx0, adx1, axis=None, frame=None, center_to=None):
'''
This a short method to center and align the molecule along some defined axis.
Args:
adx0 (int): Atom to place at the origin
adx1 (int): Atom to align along the axis
axis (list): Axis that the vector adx0-adx1 will align to
frame (int): Frame to align
Returns:
aligned (:class:`exatomic.Universe.atom`): Aligned atom frame
'''
if frame is None: atom = self.last_frame.copy()
else: atom = self[self.frame == frame].copy()
cols = ['x', 'y', 'z']
# define the original vector
v0 = atom.iloc[adx1][cols].values.astype(np.float64) - atom.iloc[adx0][cols].values.astype(np.float64)
# get the vector to align with and normalize
v1 = axis/np.linalg.norm(axis)
# find the normal vector to rotate around
n = np.cross(v0, v1)
# find the angle to rotate the vector
theta = np.arccos(np.dot(v0, v1) / (np.linalg.norm(v0)*np.linalg.norm(v1)))
# use the center method to center the molecule
centered = Atom(atom).center(adx0, frame=frame, to=center_to)
# rotate the molecule around the normal vector
aligned = centered.rotate(theta=theta, axis=n, degrees=False)
return Atom(aligned)
def to_xyz(self, tag='symbol', header=False, comments='', columns=None,
frame=None, units='Angstrom'):
"""
Return atomic data in XYZ format, by default without the first 2 lines.
If multiple frames are specified, return an XYZ trajectory format. If
frame is not specified, by default returns the last frame in the table.
Args:
tag (str): column name to use in place of 'symbol'
header (bool): if True, return the first 2 lines of XYZ format
comment (str, list): comment(s) to put in the comment line
frame (int, iter): frame or frames to return
units (str): units (default angstroms)
Returns:
ret (str): XYZ formatted atomic data
"""
# TODO :: this is conceptually a duplicate of XYZ.from_universe
columns = (tag, 'x', 'y', 'z') if columns is None else columns
frame = self.nframes - 1 if frame is None else frame
if isinstance(frame, Integral): frame = [frame]
if not isinstance(comments, list): comments = [comments]
if len(comments) == 1: comments = comments * len(frame)
df = self[self['frame'].isin(frame)].copy()
if tag not in df.columns:
if tag == 'Z':
stoz = sym2z()
df[tag] = df['symbol'].map(stoz)
df['x'] *= Length['au', units]
df['y'] *= Length['au', units]
df['z'] *= Length['au', units]
grps = df.groupby('frame')
ret = ''
formatter = {tag: '{:<5}'.format}
stargs = {'columns': columns, 'header': False,
'index': False, 'formatters': formatter}
t = 0
for _, grp in grps:
if not len(grp): continue
tru = (header or comments[t] or len(frame) > 1)
hdr = '\n'.join([str(len(grp)), comments[t], '']) if tru else ''
ret = ''.join([ret, hdr, grp.to_string(**stargs), '\n'])
t += 1
return ret
def get_element_masses(self):
"""Compute and return element masses from symbols."""
return self['symbol'].astype('O').map(sym2mass)
def get_atom_labels(self):
"""
Compute and return enumerated atoms.
Returns:
labels (:class:`~exa.core.numerical.Series`): Enumerated atom labels (of type int)
"""
nats = self.cardinal_groupby().size().values
labels = Series([i for nat in nats for i in range(nat)], dtype='category')
labels.index = self.index
return labels
@classmethod
def from_small_molecule_data(cls, center=None, ligand=None, distance=None, geometry=None,
offset=None, plane=None, axis=None, domains=None, unit='Angstrom'):
'''
A minimal molecule builder for simple one-center, homogeneous ligand
molecules of various general chemistry molecular geometries. If domains
is not specified and geometry is ambiguous (like 'bent'),
it just guesses the simplest geometry (smallest number of domains).
Args
center (str): atomic symbol of central atom
ligand (str): atomic symbol of ligand atoms
distance (float): distance between central atom and any ligand
geometry (str): molecular geometry
domains (int): number of electronic domains
offset (np.array): 3-array of position of central atom
plane (str): cartesian plane of molecule (eg. for 'square_planar')
axis (str): cartesian axis of molecule (eg. for 'linear')
Returns
exatomic.atom.Atom: Atom table of small molecule
'''
return cls(make_small_molecule(center=center, ligand=ligand, distance=distance,
geometry=geometry, offset=offset, plane=plane,
axis=axis, domains=domains, unit=unit))
class UnitAtom(DataFrame):
"""
In unit cell coordinates (sparse) for periodic systems. These coordinates
are used to update the corresponding :class:`~exatomic.atom.Atom` object
"""
_index = 'atom'
_columns = ['x', 'y', 'z']
#@property
#def _constructor(self):
# return UnitAtom
@classmethod
def from_universe(cls, universe):
if universe.periodic:
if "rx" not in universe.frame.columns:
universe.frame.compute_cell_magnitudes()
a, b, c = universe.frame[["rx", "ry", "rz"]].max().values
x = modv(universe.atom['x'].values, a)
y = modv(universe.atom['y'].values, b)
z = modv(universe.atom['z'].values, c)
df = pd.DataFrame.from_dict({'x': x, 'y': y, 'z': z})
df.index = universe.atom.index
return cls(df[universe.atom[['x', 'y', 'z']] != df])
raise PeriodicUniverseError()
class ProjectedAtom(DataFrame):
"""
Projected atom coordinates (e.g. on 3x3x3 supercell). These coordinates are
typically associated with their corresponding indices in another dataframe.
Note:
This table is computed when periodic two body properties are computed;
it doesn't have meaning outside of that context.
See Also:
:func:`~exatomic.two.compute_periodic_two`.
"""
_index = 'two'
_columns = ['x', 'y', 'z']
#@property
#def _constructor(self):
# return ProjectedAtom
class VisualAtom(DataFrame):
"""
"""
_index = 'atom'
_columns = ['x', 'y', 'z']
@classmethod
def from_universe(cls, universe):
"""
"""
if universe.frame.is_periodic():
atom = universe.atom[['x', 'y', 'z']].copy()
atom.update(universe.unit_atom)
bonded = universe.atom_two.loc[universe.atom_two['bond'] == True, 'atom1'].astype(np.int64)
prjd = universe.projected_atom.loc[bonded.index].to_dense()
prjd['atom'] = bonded
prjd.drop_duplicates('atom', inplace=True)
prjd.set_index('atom', inplace=True)
atom.update(prjd)
return cls(atom[atom != universe.atom[['x', 'y', 'z']]])
raise PeriodicUniverseError()
#@property
#def _constructor(self):
# return VisualAtom
class Frequency(DataFrame):
"""
The Frequency dataframe.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| frame | category | non-unique integer (req.) |
+-------------------+----------+-------------------------------------------+
| frequency | float | frequency of oscillation (cm-1) (req.) |
+-------------------+----------+-------------------------------------------+
| freqdx | int | index of frequency of oscillation (req.) |
+-------------------+----------+-------------------------------------------+
| dx | float | atomic displacement in x direction (req.) |
+-------------------+----------+-------------------------------------------+
| dy | float | atomic displacement in y direction (req.) |
+-------------------+----------+-------------------------------------------+
| dz | float | atomic displacement in z direction (req.) |
+-------------------+----------+-------------------------------------------+
| ir_int | float | ir intensity of the vibrational mode |
+-------------------+----------+-------------------------------------------+
| symbol | str | atomic symbol (req.) |
+-------------------+----------+-------------------------------------------+
| label | int | atomic identifier |
+-------------------+----------+-------------------------------------------+
"""
_index = 'frequency'
_cardinal = ('frame', np.int64)
_categories = {'symbol': str, 'label': np.int64}
_columns = ['dx', 'dy', 'dz', 'symbol', 'frequency', 'freqdx', 'ir_int']
#@property
#def _constructor(self):
# return Frequency
def displacement(self, freqdx):
return self[self['freqdx'] == freqdx][['dx', 'dy', 'dz', 'symbol']]
def ir_spectra(self, fwhm=15, lineshape='gaussian', xrange=None, res=None, invert_x=False, **kwargs):
'''
Generate an IR spectra with the plotter classes. We can define a gaussian or lorentzian
lineshape functions. For the most part we pass all of the kwargs directly into the
plotter.Plot class.
Args:
fwhm (float): Full-width at half-maximum
lineshape (str): Switch between the different lineshape functions available
xrange (list): X-bounds for the plot
res (float): Resolution for the plot line
invert_x (bool): Invert x-axis
'''
# define the lineshape and store the function call in the line variable
try:
line = getattr(plotter, lineshape)
except AttributeError:
raise NotImplementedError("Sorry we have not yet implemented the lineshape {}.".format(lineshape))
# define a default parameter for the plot width
# we did this for a full-screen jupyter notebook on a 1920x1080 monitor
if not "plot_width" in kwargs:
kwargs.update(plot_width=900)
# define xbounds
xrange = [0, 4000] if xrange is None else xrange
# deal with inverted bounds
if xrange[0] > xrange[1]:
xrange = sorted(xrange)
invert_x = True
# define the resolution
res = fwhm/50 if res is None else res
# define the class
plot = plotter.Plot(**kwargs)
# this is designed for a single frame
if self['frame'].unique().shape[0] != 1:
raise NotImplementedError("We have not yet expanded to include multiple frames")
# grab the locations of the peaks between the bounds
freqdx = self['freqdx'].drop_duplicates().index
freq = self.loc[freqdx, 'frequency']
freq = freq[freq.between(*xrange)]
# grab the ir intensity data
# we use the frequency indexes instead of drop duplicates as we may have similar intensities
inten = self.loc[freq.index, 'ir_int'].astype(np.float64).values
# change to using the values instead as we no longer need the index data
# we could also use jit for the lineshape functions as we only deal with numpy arrays
freq = freq.values
x_data = np.arange(*xrange, res)
# get the y data by calling the lineshape function generator
y_data = line(freq=freq, x=x_data, fwhm=fwhm, inten=inten)
# plot the lineshape data
plot.fig.line(x_data, y_data)
# plot the points on the plot to show were the frequency values are
# more useful when we have nearly degenerate vibrations
plot.fig.scatter(freq, line(freq=freq, x=freq, fwhm=fwhm, inten=inten))
if invert_x:
plot.set_xrange(xmin=xrange[1], xmax=xrange[0])
else:
plot.set_xrange(xmin=xrange[0], xmax=xrange[1])
# display the figure with our generated method
plot.show()
def add_vibrational_mode(uni, freqdx):
displacements = uni.frequency.displacements(freqdx)
if not all(displacements['symbol'] == uni.atom['symbol']):
print('Mismatch in ordering of atoms and frequencies.')
return
displaced = []
frames = []
# Should these only be absolute values?
factor = np.abs(np.sin(np.linspace(-4*np.pi, 4*np.pi, 200)))
for fac in factor:
moved = uni.atom.copy()
moved['x'] += displacements['dx'].values * fac
moved['y'] += displacements['dy'].values * fac
moved['z'] += displacements['dz'].values * fac
displaced.append(moved)
frames.append(uni.frame)
movie = pd.concat(displaced).reset_index()
movie['frame'] = np.repeat(range(len(factor)), len(uni.atom))
uni.frame = pd.concat(frames).reset_index()
uni.atom = movie
| apache-2.0 |
tkphd/pycalphad | pycalphad/plot/utils.py | 1 | 1820 | """
The plot utils module contains some useful routines related to plotting.
"""
import matplotlib.patches as mpatches
import numpy as np
def phase_legend(phases):
"""
Build matplotlib handles for the plot legend.
Parameters
----------
phases : list
Names of the phases.
Returns
-------
A tuple containing:
(1) A list of matplotlib handle objects
(2) A dict mapping phase names to their RGB color on the plot
Examples
--------
>>> legend_handles, colors = phase_legend(['FCC_A1', 'BCC_A2', 'LIQUID'])
"""
colorlist = {}
# colors from Junwei Huang, March 21 2013
# exclude green and red because of their special meaning on the diagram
colorvalues = ["0000FF", "FFFF00", "FF00FF", "00FFFF", "000000", "800000",
"008000", "000080", "808000", "800080", "008080", "808080",
"C00000", "00C000", "0000C0", "C0C000", "C000C0", "00C0C0",
"C0C0C0", "400000", "004000", "000040", "404000", "400040",
"004040", "404040", "200000", "002000", "000020", "202000",
"200020", "002020", "202020", "600000", "006000", "000060",
"606000", "600060", "006060", "606060", "A00000", "00A000",
"0000A0", "A0A000", "A000A0", "00A0A0", "A0A0A0", "E00000",
"00E000", "0000E0", "E0E000", "E000E0", "00E0E0", "E0E0E0"]
mxx = len(colorvalues)
phasecount = 0
legend_handles = []
for phase in phases:
phase = phase.upper()
colorlist[phase] = "#"+colorvalues[np.mod(phasecount, mxx)]
legend_handles.append(mpatches.Patch(color=colorlist[phase],
label=phase))
phasecount = phasecount + 1
return legend_handles, colorlist
| mit |
rahlk/RAAT | src/Planners/.old/thresholds.py | 2 | 1404 | """
CK Thresholds
"""
from __future__ import print_function, division
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import f_classif, f_regression
import sys
import texttable
sys.path.append('..')
from tools.oracle import *
from pdb import set_trace
def VARL(coef,inter,p0=0.05):
return (np.log(p0/(1-p0))-inter)/coef
def thresholds():
for name in ['ant', 'ivy', 'jedit', 'lucene', 'poi']:
print("##", name)
train, test = explore(dir='../Data/Jureczko/', name=name)
data_DF=csv2DF(train, toBin=True)
metrics=[str[1:] for str in data_DF[data_DF.columns[:-1]]]
ubr = LogisticRegression()
X = data_DF[data_DF.columns[:-1]].values
y = data_DF[data_DF.columns[-1]].values
ubr.fit(X,y)
inter, coef, pVal = ubr.intercept_[0], ubr.coef_[0], f_classif(X,y)[1]
table= texttable.Texttable()
table.set_cols_align(["l","l","l"])
table.set_cols_valign(["m","m","m"])
table.set_cols_dtype(['t', 't', 't'])
table_rows=[["Metric", "Threshold", "P-Value"]]
for i in xrange(len(metrics)):
if VARL(coef[i], inter, p0=0.05)>0 and pVal[i]<0.05:
thresh="%0.2f"%VARL(coef[i], inter, p0=0.1)
table_rows.append([metrics[i], thresh, "%0.3f"%pVal[i]])
table.add_rows(table_rows)
print(table.draw())
# === DEBUG ===
set_trace()
return None
if __name__=="__main__":
thresholds()
pass
| mit |
RobertABT/heightmap | build/matplotlib/examples/pylab_examples/barb_demo.py | 13 | 1712 | '''
Demonstration of wind barb plots
'''
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-5, 5, 5)
X,Y = np.meshgrid(x, x)
U, V = 12*X, 12*Y
data = [(-1.5, .5, -6, -6),
(1, -1, -46, 46),
(-3, -1, 11, -11),
(1, 1.5, 80, 80),
(0.5, 0.25, 25, 15),
(-1.5, -0.5, -5, 40)]
data = np.array(data, dtype=[('x', np.float32), ('y', np.float32),
('u', np.float32), ('v', np.float32)])
#Default parameters, uniform grid
ax = plt.subplot(2,2,1)
ax.barbs(X, Y, U, V)
#Arbitrary set of vectors, make them longer and change the pivot point
#(point around which they're rotated) to be the middle
ax = plt.subplot(2,2,2)
ax.barbs(data['x'], data['y'], data['u'], data['v'], length=8, pivot='middle')
#Showing colormapping with uniform grid. Fill the circle for an empty barb,
#don't round the values, and change some of the size parameters
ax = plt.subplot(2,2,3)
ax.barbs(X, Y, U, V, np.sqrt(U*U + V*V), fill_empty=True, rounding=False,
sizes=dict(emptybarb=0.25, spacing=0.2, height=0.3))
#Change colors as well as the increments for parts of the barbs
ax = plt.subplot(2,2,4)
ax.barbs(data['x'], data['y'], data['u'], data['v'], flagcolor='r',
barbcolor=['b','g'], barb_increments=dict(half=10, full=20, flag=100),
flip_barb=True)
#Masked arrays are also supported
masked_u = np.ma.masked_array(data['u'])
masked_u[4] = 1000 #Bad value that should not be plotted when masked
masked_u[4] = np.ma.masked
#Identical plot to panel 2 in the first figure, but with the point at
#(0.5, 0.25) missing (masked)
fig2 = plt.figure()
ax = fig2.add_subplot(1, 1, 1)
ax.barbs(data['x'], data['y'], masked_u, data['v'], length=8, pivot='middle')
plt.show()
| mit |
antiface/mne-python | examples/stats/plot_cluster_stats_evoked.py | 18 | 2991 | """
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=2)
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
| bsd-3-clause |
dsm054/pandas | pandas/tests/indexes/multi/test_get_set.py | 1 | 15181 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import range
import pandas as pd
from pandas import CategoricalIndex, Index, MultiIndex
import pandas.util.testing as tm
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
def test_get_level_number_integer(idx):
idx.names = [1, 0]
assert idx._get_level_number(1) == 0
assert idx._get_level_number(0) == 1
pytest.raises(IndexError, idx._get_level_number, 2)
with pytest.raises(KeyError, match='Level fourth not found'):
idx._get_level_number('fourth')
def test_get_level_values(idx):
result = idx.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = idx.get_level_values('first')
expected = idx.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_value_duplicates():
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_level_values_all_na():
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_int_with_na():
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na():
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_set_name_methods(idx, index_names):
# so long as these are synonyms, we don't need to test set_names
assert idx.rename == idx.set_names
new_names = [name + "SUFFIX" for name in index_names]
ind = idx.set_names(new_names)
assert idx.names == index_names
assert ind.names == new_names
with pytest.raises(ValueError, match="^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = idx.set_names(new_names[0], level=0)
assert idx.names == index_names
assert ind.names == [new_names[0], index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], index_names[1]]
# set names for multiple levels
ind = idx.set_names(new_names, level=[0, 1])
assert idx.names == index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
def test_set_levels_labels_directly(idx):
# setting levels/labels directly raises AttributeError
levels = idx.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = idx.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
idx.levels = new_levels
with pytest.raises(AttributeError):
idx.labels = new_labels
def test_set_levels(idx):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = idx.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
# level changing [w/o mutation]
ind2 = idx.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = idx.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = idx.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing specific level [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = idx.copy()
for inplace in [True, False]:
with pytest.raises(ValueError, match="^On"):
idx.set_levels(['c'], level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels,
check_dtype=True)
with pytest.raises(ValueError, match="^On"):
idx.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(idx.labels, original_index.labels,
check_dtype=True)
with pytest.raises(TypeError, match="^Levels"):
idx.set_levels('c', level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels,
check_dtype=True)
with pytest.raises(TypeError, match="^Labels"):
idx.set_labels(1, level=0, inplace=inplace)
assert_matching(idx.labels, original_index.labels,
check_dtype=True)
def test_set_labels(idx):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = idx.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
# label changing [w/o mutation]
ind2 = idx.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(idx.labels, labels)
# label changing [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = idx.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(idx.labels, labels)
ind2 = idx.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(idx.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = idx.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(idx.labels, labels)
# label changing specific level [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(idx.labels, labels)
ind2 = idx.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(idx.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(idx.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(idx):
levels, labels = idx.levels, idx.labels
names = idx.names
with pytest.raises(ValueError, match='Length of levels'):
idx.set_levels([levels[0]])
with pytest.raises(ValueError, match='Length of labels'):
idx.set_labels([labels[0]])
with pytest.raises(ValueError, match='Length of names'):
idx.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list-like'):
idx.set_names(names[0])
# should have equal lengths
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_levels(levels[0], level=[0, 1])
with pytest.raises(TypeError, match='list-like'):
idx.set_levels(levels, level=0)
# should have equal lengths
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_labels(labels[0], level=[0, 1])
with pytest.raises(TypeError, match='list-like'):
idx.set_labels(labels, level=0)
# should have equal lengths
with pytest.raises(ValueError, match='Length of names'):
idx.set_names(names[0], level=[0, 1])
with pytest.raises(TypeError, match='Names must be a'):
idx.set_names(names, level=0)
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('ordered', [True, False])
def test_set_levels_categorical(ordered):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_set_value_keeps_names():
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_set_levels_with_iterable():
# GH23273
sizes = [1, 2, 3]
colors = ['black'] * 3
index = pd.MultiIndex.from_arrays([sizes, colors], names=['size', 'color'])
result = index.set_levels(map(int, ['3', '2', '1']), level='size')
expected_sizes = [3, 2, 1]
expected = pd.MultiIndex.from_arrays([expected_sizes, colors],
names=['size', 'color'])
tm.assert_index_equal(result, expected)
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | dynamic_test/Newmark/gamma0.6/post.py | 9 | 2369 | #!/usr/bin/python
#Standard python libs
import sys
import os
# import datetime
import numpy as np
import h5py
import matplotlib.pyplot as plt
from math import *
#Libs related to scipy and matplotlib
from scipy import *
from scipy.fftpack import fft
from scipy.fftpack.helper import fftfreq
sys.path.append("./" )
# time_integrator_analysis was created by Jose.
# Jose's function was used directly here.
from time_integrator_analysis import findpeaks, measure_damping, hht_damping_and_shift
h5in_file_name=sys.argv[1]
gamma=sys.argv[2]
gamma=float(gamma)
h5file_in=h5py.File(h5in_file_name,"r")
# h5file_in=h5py.File("veri_newmark_dynamic.h5.feioutput","r")
disp=h5file_in['/Model/Nodes/Generalized_Displacements'][()]
time=h5file_in['/time'][()]
# The required displacement.
node_displ=disp[6][:]
last=len(node_displ)-1
len_time=len(time)
node_displ=np.delete(node_displ,[last],None)
dt=time[1]-time[0]
peak_indices=findpeaks(node_displ)
measured_period=time[peak_indices[2]]-time[peak_indices[1]]
alpha=0.0
N = node_displ.shape[0]
D = fft(node_displ[:])
f = fftfreq(N, dt)
xi, fs, Ys = measure_damping(f[0:N/2], abs(D[0:N/2]))
T_system=1.0
w = 2*pi/T_system
beta = 0.25*(0.5 + gamma)**2
wbar, xibar = hht_damping_and_shift(beta, gamma, alpha, w, dt)
T_theory = 2*pi/wbar
T_shift = (T_theory - T_system)/T_system*100
print "gamma=", gamma
print "xi=", xibar
print "T_shift=",T_shift
print "\n"
# My own method to calculate the theoretical wbar for Newmark method:
# I got the same result with the pre-existing one.
# dtw=dt*w
# numerator=dtw*sqrt(1+dtw**2*(beta-0.25*(gamma+0.5)**2))
# denominator=1+dtw**2*(beta-0.5*(gamma+0.5))w
# Phi=arctan(numerator/denominator)
# wbarmy=Phi/dt
# wbarmy
# # time=np.transpose(time)
# print ("%16.8f \n" %len(node_displ))
# print ("%16.8f \n" %len_time)
# print ("%16.8f \n" %node_displ[0])
# print ("%16.8f \n" %node_displ[1])
# print ("%16.8f \n" %time[0])
# print ("%16.8f \n" %time[1])
# xi=0.1
# u_0=0.1
# w_n=2*pi
# w_D=w_n*np.sqrt(1-xi**2)
# u_exact=np.exp(-xi*w_n*time)*(u_0*np.cos(time*w_D)+(xi*w_n*u_0)/w_D*np.sin(w_D*time))
# # print("time")
# # , (comma) cannot be ignored.
# u_essi,=plt.plot(time, node_displ,'ro-')
# u_disp,=plt.plot(time, u_exact,'b^--')
# plt.xlabel('Time')
# plt.ylabel('Displacement')
# plt.legend([u_essi, u_disp], ["ESSI", "Exact"])
# plt.show()
| cc0-1.0 |
liyu1990/sklearn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
VipulSarin/citenet | scopus_module/scopus/scopus_reports.py | 1 | 5970 | from .scopus_api import ScopusAbstract, ScopusJournal
from .scopus_author import ScopusAuthor
def report(scopus_search, label):
'''Prints out an org-mode report for the results from the scopus_search with
the label.
'''
counts = {} # to count papers per author
journals = {} # to count publications per journal
author_count = [] # to count number of authors for each paper for a
# histogram
paper_cites = {}
Ncites = 0
document_types = {}
# import json
# with open('/Users/jkitchin/Dropbox/CMU/department/2015/'
# 'scopus/2013-journal-impact-factors.json') as f:
# impact_factors = json.loads(f.read())
N = 0 # to count number of publications
for eid in scopus_search.EIDS:
a = ScopusAbstract(eid)
# Get types of documents
if a.aggregationType in document_types:
document_types[a.aggregationType] += 1
else:
document_types[a.aggregationType] = 1
if a.aggregationType == 'Journal':
Ncites += int(a.citedby_count) # get total cites
N += 1 # count all the papers
# get count for journals
jkey = (a.publicationName, a.source_id, a.issn)
if jkey in journals:
journals[jkey] += 1
else:
journals[jkey] = 1
# get authors per paper
author_count += [len(a.authors)]
# now count papers per author
for author in a.authors:
key = (author.indexed_name, author.auid)
if key in counts:
counts[key] += 1
else:
counts[key] = 1
# counting cites per paper
key = (a.title, a.scopus_link)
if key in paper_cites:
paper_cites[key] += a.citedby_count
else:
paper_cites[key] = a.citedby_count
print('*** Report for {}\n'.format(label))
print('#+attr_latex: :placement [H] :center nil')
print('#+caption: Types of documents found for {}.'.format(label))
print('| Document type | count |\n|-')
for key in document_types:
print('| {0} | {1} |'.format(key, document_types[key]))
print('\n\n')
print('{0} articles ({2} citations) '
'found by {1} authors'.format(N,
len(counts),
Ncites))
from operator import itemgetter
# Author counts {(name, scopus-id): count}
view = [('[[scopusid:{0}][{1}]]'.format(k[1], k[0]), # org-mode link
v, # counts
k[1]) # scopus-id
for k, v in counts.items()]
view.sort(reverse=True, key=itemgetter(1))
print('\n#+attr_latex: :placement [H] :center nil')
print('#+caption: Author publication counts for {0}.'.format(label))
print('| name | count | categories |')
print('|-')
for name, count, scopus_id in view[0:20]:
cats = ', '.join(['{0} ({1})'.format(cat[0], cat[1])
for cat in ScopusAuthor(scopus_id).categories[0:3]])
print('| {0} | {1} | {2} |'.format(name,
count,
cats))
# journal view
s = '[[http://www.scopus.com/source/sourceInfo.url?sourceId={0}][{1}]]'
jview = [(s.format(k[1], k[0][0:50]), # url
k[1], # source_id
k[2], # issn
v) # count
for k, v in journals.items()]
jview.sort(reverse=True, key=itemgetter(3))
print('\n\n')
print('#+attr_latex: :placement [H] :center nil')
print('#+caption: Journal publication counts for {0}.'.format(label))
print('| Journal | count | IPP |')
print('|-')
for journal, sid, issn, count in jview[0:12]:
JOURNAL = ScopusJournal(issn)
IPP = JOURNAL.IPP or 0
print('| {0} | {1} | {2} |'.format(journal,
count,
IPP))
# view of journals sorted by `IPP
JVIEW = []
for journal, sid, issn, count in jview:
JOURNAL = ScopusJournal(issn)
IPP = JOURNAL.IPP or 0
JVIEW.append([journal, count, IPP])
JVIEW.sort(reverse=True, key=itemgetter(2))
print('\n\n')
print('#+attr_latex: :placement [H] :center nil')
print('#+caption: Journal publication counts'
' for {0} sorted by IPP.'.format(label))
print('| Journal | count | IPP |')
print('|-')
for journal, count, IPP in JVIEW[0:12]:
print('|{0}|{1}|{2}|'.format(journal, count, IPP))
# top cited papers
pview = [('[[{0}][{1}]]'.format(k[1], k[0][0:60]),
int(v))
for k, v in paper_cites.items()]
pview.sort(reverse=True, key=itemgetter(1))
# Compute department j-index
hindex = 0
for i, entry in enumerate(pview):
# entry is url, source_id, count
u, count = entry
if count > i + 1:
continue
else:
hindex = i + 1
break
print('\n\n#+attr_latex: :placement [H] :center nil')
print('#+caption: Top cited publication'
' counts for {0}. j-index = {1}.'.format(label, hindex))
print('| title | cite count |\n|-')
for title, count in pview[0:10]:
print('| {0} | {1} |'.format(title, count))
import matplotlib.pyplot as plt
plt.figure()
plt.hist(author_count, 20)
plt.xlabel('# authors')
plt.ylabel('frequency')
plt.savefig('{0}-nauthors-per-publication.png'.format(label))
print('\n\n#+caption: Number of authors '
'on each publication for {}.'.format(label))
print('[[./{0}-nauthors-per-publication.png]]'.format(label))
print('''**** Bibliography :noexport:
:PROPERTIES:
:VISIBILITY: folded
:END:''')
print(scopus_search.org_summary)
| gpl-3.0 |
mikemccann/stoqs | stoqs/views/permalinks.py | 4 | 3457 | __author__ = 'Chander Ganesan'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'chander at otg-nc.com'
__doc__ = '''
A set of views designed to generate a permalink based on a set of STOQS query
parameters.
Note that there should be work done at some point to prevent this view from
being misused, by validating the paramters/values passed in, I didn't do this
since I'm not 100% sure of all the use cases for STOQS. However, the danger
right now is that anyone could use this view to store arbitrary json data
in the database - and abuse the services of the provider hosting STOQS (and
even do nasty things like javascript injection things - though such things
won't impact STOQS web services, which only load the json, not run it.) Enabling
CSRF protection and account login as well would be great ideas and greatly mitigate
the danger here.
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.http import Http404
from django.http import HttpResponse
from stoqs.views import BaseOutputer
from stoqs import models
##import matplotlib.pyplot as plt
import logging
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
import simplejson as json
from django.core.urlresolvers import reverse
import threading
_thread_local_vars = threading.local()
logger=logging.getLogger(__name__)
@csrf_exempt
def generate_permalink(request):
data=request.POST.get('parameters')
if data:
try:
# Just make sure it is valid json before storing it.
parameters=json.loads(data)
m=models.PermaLink(parameters=data)
m.save()
logger.debug('Saved link with id of %s', m.pk)
url="%s?permalink_id=%s" % (reverse('stoqs-query-ui',
kwargs={'dbAlias' :
request.META['dbAlias']}),
m.pk)
# url=reverse('redirect_permalink',
# kwargs={'dbAlias' : (request.META['dbAlias']),
# 'id': m.pk})
except Exception, e:
logger.exception('Doh!')
logger.debug('Attempt to create permalink without valid data')
raise SuspiciousOperation('Attempt to create permalink without any data, or with invalid data')
else:
# In the case where they request a permalink, but without selecting
# any parameters, we'll just return to them the current URL for the
# tool, so we don't store unnecessary permalinks
url=reverse('stoqs-query-ui',
kwargs={'dbAlias' : request.META['dbAlias']})
return HttpResponse(request.build_absolute_uri(url))
def load_permalink(request, id):
logger.debug('Got request for link with ID of %s', id)
try:
m=models.PermaLink.objects.get(pk=id)
m.usage_count = m.usage_count + 1
m.save()
# return the JSON for the permalink data
response=HttpResponse(m.parameters,
content_type="application/json")
return response
except ObjectDoesNotExist, e:
logger.debug('Attempted to get a permalink that does not exist: %s', id)
raise Http404 | gpl-3.0 |
luispedro/BuildingMachineLearningSystemsWithPython | ch06/04_sent.py | 22 | 10125 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import re
import nltk
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.base import BaseEstimator
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "04"
import json
poscache_filename = "poscache.json"
try:
poscache = json.load(open(poscache_filename, "r"))
except IOError:
poscache = {}
class LinguisticVectorizer(BaseEstimator):
def get_feature_names(self):
return np.array(['sent_neut', 'sent_pos', 'sent_neg',
'nouns', 'adjectives', 'verbs', 'adverbs',
'allcaps', 'exclamation', 'question'])
def fit(self, documents, y=None):
return self
def _get_sentiments(self, d):
# http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
sent = tuple(nltk.word_tokenize(d))
if poscache is not None:
if d in poscache:
tagged = poscache[d]
else:
poscache[d] = tagged = nltk.pos_tag(sent)
else:
tagged = nltk.pos_tag(sent)
pos_vals = []
neg_vals = []
nouns = 0.
adjectives = 0.
verbs = 0.
adverbs = 0.
for w, t in tagged:
p, n = 0, 0
sent_pos_type = None
if t.startswith("NN"):
sent_pos_type = "n"
nouns += 1
elif t.startswith("JJ"):
sent_pos_type = "a"
adjectives += 1
elif t.startswith("VB"):
sent_pos_type = "v"
verbs += 1
elif t.startswith("RB"):
sent_pos_type = "r"
adverbs += 1
if sent_pos_type is not None:
sent_word = "%s/%s" % (sent_pos_type, w)
if sent_word in sent_word_net:
p, n = sent_word_net[sent_word]
pos_vals.append(p)
neg_vals.append(n)
l = len(sent)
avg_pos_val = np.mean(pos_vals)
avg_neg_val = np.mean(neg_vals)
return [1 - avg_pos_val - avg_neg_val, avg_pos_val, avg_neg_val,
nouns / l, adjectives / l, verbs / l, adverbs / l]
def transform(self, documents):
obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs = np.array(
[self._get_sentiments(d) for d in documents]).T
allcaps = []
exclamation = []
question = []
for d in documents:
allcaps.append(
np.sum([t.isupper() for t in d.split() if len(t) > 2]))
exclamation.append(d.count("!"))
question.append(d.count("?"))
result = np.array(
[obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs, allcaps,
exclamation, question]).T
return result
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in list(emo_repl.keys())]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_union_model(params=None):
def preprocessor(tweet):
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.items():
tweet = re.sub(r, repl, tweet)
return tweet.replace("-", " ").replace("_", " ")
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
ling_stats = LinguisticVectorizer()
all_features = FeatureUnion(
[('ling', ling_stats), ('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('ling', ling_stats)])
clf = MultinomialNB()
pipeline = Pipeline([('all', all_features), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def __grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print(clf)
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(all__tfidf__ngram_range=(1, 2),
all__tfidf__min_df=1,
all__tfidf__stop_words=None,
all__tfidf__smooth_idf=False,
all__tfidf__use_idf=False,
all__tfidf__sublinear_tf=True,
all__tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_union_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
#from sklearn.utils import shuffle
# print "shuffle, sample"
#X_orig, Y_orig = shuffle(X_orig, Y_orig)
#X_orig = X_orig[:100,]
#Y_orig = Y_orig[:100,]
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
json.dump(poscache, open(poscache_filename, "w"))
| mit |
mlyundin/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
ahnitz/mpld3 | mpld3/plugins.py | 1 | 23266 | """
Plugins to add behavior to mpld3 charts
=======================================
Plugins are means of adding additional javascript features to D3-rendered
matplotlib plots. A number of plugins are defined here; it is also possible
to create nearly any imaginable behavior by defining your own custom plugin.
"""
__all__ = ['connect', 'clear', 'get_plugins', 'PluginBase',
'Reset', 'Zoom', 'BoxZoom',
'PointLabelTooltip', 'PointHTMLTooltip', 'LineLabelTooltip',
'MousePosition']
import collections
import json
import uuid
import matplotlib
from .utils import get_id
def get_plugins(fig):
"""Get the list of plugins in the figure"""
connect(fig)
return fig.mpld3_plugins
def connect(fig, *plugins):
"""Connect one or more plugins to a figure
Parameters
----------
fig : matplotlib Figure instance
The figure to which the plugins will be connected
*plugins :
Additional arguments should be plugins which will be connected
to the figure.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), '-k')
>>> plugins.connect(fig, plugins.LineLabelTooltip(lines[0]))
"""
if not isinstance(fig, matplotlib.figure.Figure):
raise ValueError("plugins.connect: first argument must be a figure")
if not hasattr(fig, 'mpld3_plugins'):
fig.mpld3_plugins = DEFAULT_PLUGINS[:]
for plugin in plugins:
fig.mpld3_plugins.append(plugin)
def clear(fig):
"""Clear all plugins from the figure, including defaults"""
fig.mpld3_plugins = []
class PluginBase(object):
def get_dict(self):
return self.dict_
def javascript(self):
if hasattr(self, "JAVASCRIPT"):
if hasattr(self, "js_args_"):
return self.JAVASCRIPT.render(self.js_args_)
else:
return self.JAVASCRIPT
else:
return ""
def css(self):
if hasattr(self, "css_"):
return self.css_
else:
return ""
class Reset(PluginBase):
"""A Plugin to add a reset button"""
dict_ = {"type": "reset"}
class MousePosition(PluginBase):
"""A Plugin to display coordinates for the current mouse position
Example
-------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, plugins.MousePosition())
>>> fig_to_html(fig)
"""
def __init__(self, fontsize=12, fmt=".3g"):
self.dict_ = {"type": "mouseposition",
"fontsize": fontsize,
"fmt": fmt}
class Zoom(PluginBase):
"""A Plugin to add zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "zoom",
"button": button,
"enabled": enabled}
class BoxZoom(PluginBase):
"""A Plugin to add box-zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "boxzoom",
"button": button,
"enabled": enabled}
class PointLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : array or None
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, PointLabelTooltip(points[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "tooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LineLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over a line.
Parameters
----------
line : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), 'o')
>>> plugins.connect(fig, LineLabelTooltip(lines[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, label=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
self.dict_ = {"type": "tooltip",
"id": get_id(points),
"labels": label if label is None else [label],
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LinkedBrush(PluginBase):
"""A Plugin to enable linked brushing between plots
Parameters
----------
points : matplotlib Collection or Line2D object
A representative of the scatter plot elements to brush.
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. default=True.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpld3 import fig_to_html, plugins
>>> X = np.random.random((3, 100))
>>> fig, ax = plt.subplots(3, 3)
>>> for i in range(2):
... for j in range(2):
... points = ax[i, j].scatter(X[i], X[j])
>>> plugins.connect(fig, LinkedBrush(points))
>>> fig_to_html(fig)
Notes
-----
Notice that in the above example, only one of the four sets of points is
passed to the plugin. This is all that is needed: for the sake of efficient
data storage, mpld3 keeps track of which plot objects draw from the same
data.
Also note that for the linked brushing to work correctly, the data must
not contain any NaNs. The presence of NaNs makes the different data views
have different sizes, so that mpld3 is unable to link the related points.
"""
def __init__(self, points, button=True, enabled=True):
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "linkedbrush",
"button": button,
"enabled": enabled,
"id": get_id(points, suffix)}
class PointHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : list
The labels for each point in points, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> labels = ['<h1>{title}</h1>'.format(title=i) for i in range(10)]
>>> plugins.connect(fig, PointHTMLTooltip(points[0], labels))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("htmltooltip", HtmlTooltipPlugin);
HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
hoffset:0,
voffset:10};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var labels = this.props.labels;
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(labels[i])
.style("visibility", "visible");})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");});
};
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, css=None):
self.points = points
self.labels = labels
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "htmltooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset}
class LineHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
The label for the line, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10))
>>> label = '<h1>line {title}</h1>'.format(title='A')
>>> plugins.connect(fig, LineHTMLTooltip(lines[0], label))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("linehtmltooltip", LineHTMLTooltip);
LineHTMLTooltip.prototype = Object.create(mpld3.Plugin.prototype);
LineHTMLTooltip.prototype.constructor = LineHTMLTooltip;
LineHTMLTooltip.prototype.requiredProps = ["id"];
LineHTMLTooltip.prototype.defaultProps = {label:null,
hoffset:0,
voffset:10};
function LineHTMLTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LineHTMLTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id, this.fig);
var label = this.props.label
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(label)
.style("visibility", "visible");
})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");})
};
"""
def __init__(self, line, label=None,
hoffset=0, voffset=10,
css=None):
self.line = line
self.label = label
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
self.dict_ = {"type": "linehtmltooltip",
"id": get_id(line),
"label": label,
"hoffset": hoffset,
"voffset": voffset}
class InteractiveLegendPlugin(PluginBase):
"""A plugin for an interactive legends.
Inspired by http://bl.ocks.org/simzou/6439398
Parameters
----------
plot_elements : iterable of matplotlib elements
the elements to associate with a given legend items
labels : iterable of strings
The labels for each legend element
ax : matplotlib axes instance, optional
the ax to which the legend belongs. Default is the first
axes. The legend will be plotted to the right of the specified
axes
alpha_sel : float, optional
the alpha value to apply to the plot_element(s) associated
with the legend item when the legend item is selected.
Default is 1.0
alpha_unsel : float, optional
the alpha value to apply to the plot_element(s) associated
with the legend item when the legend item is unselected.
Default is 0.2
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> N_paths = 5
>>> N_steps = 100
>>> x = np.linspace(0, 10, 100)
>>> y = 0.1 * (np.random.random((N_paths, N_steps)) - 0.5)
>>> y = y.cumsum(1)
>>> fig, ax = plt.subplots()
>>> labels = ["a", "b", "c", "d", "e"]
>>> line_collections = ax.plot(x, y.T, lw=4, alpha=0.1)
>>> interactive_legend = plugins.InteractiveLegendPlugin(line_collections,
... labels,
... alpha_unsel=0.1)
>>> plugins.connect(fig, interactive_legend)
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("interactive_legend", InteractiveLegend);
InteractiveLegend.prototype = Object.create(mpld3.Plugin.prototype);
InteractiveLegend.prototype.constructor = InteractiveLegend;
InteractiveLegend.prototype.requiredProps = ["element_ids", "labels"];
InteractiveLegend.prototype.defaultProps = {"ax":null,
"alpha_sel":1.0,
"alpha_unsel":0}
function InteractiveLegend(fig, props){
mpld3.Plugin.call(this, fig, props);
};
InteractiveLegend.prototype.draw = function(){
var alpha_sel = this.props.alpha_sel;
var alpha_unsel = this.props.alpha_unsel;
var legendItems = new Array();
for(var i=0; i<this.props.labels.length; i++){
var obj = {};
obj.label = this.props.labels[i];
var element_id = this.props.element_ids[i];
mpld3_elements = [];
for(var j=0; j<element_id.length; j++){
var mpld3_element = mpld3.get_element(element_id[j], this.fig);
// mpld3_element might be null in case of Line2D instances
// for we pass the id for both the line and the markers. Either
// one might not exist on the D3 side
if(mpld3_element){
mpld3_elements.push(mpld3_element);
}
}
obj.mpld3_elements = mpld3_elements;
obj.visible = false; // should become be setable from python side
legendItems.push(obj);
}
// determine the axes with which this legend is associated
var ax = this.props.ax
if(!ax){
ax = this.fig.axes[0];
} else{
ax = mpld3.get_element(ax, this.fig);
}
// add a legend group to the canvas of the figure
var legend = this.fig.canvas.append("svg:g")
.attr("class", "legend");
// add the rectangles
legend.selectAll("rect")
.data(legendItems)
.enter().append("rect")
.attr("height",10)
.attr("width", 25)
.attr("x",ax.width+10+ax.position[0])
.attr("y",function(d,i) {
return ax.position[1]+ i * 25 - 10;})
.attr("stroke", get_color)
.attr("class", "legend-box")
.style("fill", function(d, i) {
return d.visible ? get_color(d) : "white";})
.on("click", click);
// add the labels
legend.selectAll("text")
.data(legendItems)
.enter().append("text")
.attr("x", function (d) {
return ax.width+10+ax.position[0] + 40;})
.attr("y", function(d,i) {
return ax.position[1]+ i * 25;})
.text(function(d) { return d.label });
// specify the action on click
function click(d,i){
d.visible = !d.visible;
d3.select(this)
.style("fill",function(d, i) {
return d.visible ? get_color(d) : "white";
})
for(var i=0; i<d.mpld3_elements.length; i++){
var type = d.mpld3_elements[i].constructor.name;
if(type =="mpld3_Line"){
d3.select(d.mpld3_elements[i].path[0][0])
.style("stroke-opacity",
d.visible ? alpha_sel : alpha_unsel);
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
d3.selectAll(d.mpld3_elements[i].pathsobj[0])
.style("stroke-opacity",
d.visible ? alpha_sel : alpha_unsel)
.style("fill-opacity",
d.visible ? alpha_sel : alpha_unsel);
} else{
console.log(type + " not yet supported");
}
}
};
// helper function for determining the color of the rectangles
function get_color(d){
var type = d.mpld3_elements[0].constructor.name;
var color = "black";
if(type =="mpld3_Line"){
color = d.mpld3_elements[0].props.edgecolor;
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
color = d.mpld3_elements[0].props.facecolors[0];
} else{
console.log(type + " not yet supported");
}
return color;
};
};
"""
css_ = """
.legend-box {
cursor: pointer;
}
"""
def __init__(self, plot_elements, labels, ax=None,
alpha_sel=1, alpha_unsel=0.2):
self.ax = ax
if ax:
ax = get_id(ax)
mpld3_element_ids = self._determine_mpld3ids(plot_elements)
self.mpld3_element_ids = mpld3_element_ids
self.dict_ = {"type": "interactive_legend",
"element_ids": mpld3_element_ids,
"labels": labels,
"ax": ax,
"alpha_sel": alpha_sel,
"alpha_unsel": alpha_unsel}
def _determine_mpld3ids(self, plot_elements):
"""
Helper function to get the mpld3_id for each
of the specified elements.
"""
mpld3_element_ids = []
# There are two things being done here. First,
# we make sure that we have a list of lists, where
# each inner list is associated with a single legend
# item. Second, in case of Line2D object we pass
# the id for both the marker and the line.
# on the javascript side we filter out the nulls in
# case either the line or the marker has no equivalent
# D3 representation.
for entry in plot_elements:
ids = []
if isinstance(entry, collections.Iterable):
for element in entry:
mpld3_id = get_id(element)
ids.append(mpld3_id)
if isinstance(element, matplotlib.lines.Line2D):
mpld3_id = get_id(element, 'pts')
ids.append(mpld3_id)
else:
ids.append(get_id(entry))
if isinstance(entry, matplotlib.lines.Line2D):
mpld3_id = get_id(entry, 'pts')
ids.append(mpld3_id)
mpld3_element_ids.append(ids)
return mpld3_element_ids
DEFAULT_PLUGINS = [Reset(), Zoom(), BoxZoom()]
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
msultan/msmbuilder | msmbuilder/tests/test_agglomerative.py | 6 | 4311 | import numpy as np
from mdtraj.testing import eq
from sklearn.base import clone
from sklearn.metrics import adjusted_rand_score
from msmbuilder.cluster import LandmarkAgglomerative
from msmbuilder.example_datasets import AlanineDipeptide
random = np.random.RandomState(2)
def test_1():
x = [random.randn(10, 2), random.randn(10, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters)
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
n_landmarks=sum(len(s) for s in x))
labels0 = clone(model1).fit(x).predict(x)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert len(labels0) == 2
assert len(labels1) == 2
assert len(labels2) == 2
eq(labels0[0], labels1[0])
eq(labels0[1], labels1[1])
eq(labels0[0], labels2[0])
eq(labels0[1], labels2[1])
assert len(np.unique(np.concatenate(labels0))) == n_clusters
def test_2():
# this should be a really easy clustering problem
x = [random.randn(20, 2) + 10, random.randn(20, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters)
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
landmark_strategy='random',
random_state=random, n_landmarks=20)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert adjusted_rand_score(np.concatenate(labels1),
np.concatenate(labels2)) == 1.0
def test_callable_metric():
def my_euc(target, ref, i):
return np.sqrt(np.sum((target - ref[i]) ** 2, axis=1))
model1 = LandmarkAgglomerative(n_clusters=10, n_landmarks=20,
metric='euclidean')
model2 = LandmarkAgglomerative(n_clusters=10, n_landmarks=20, metric=my_euc)
data = np.random.RandomState(0).randn(100, 2)
eq(model1.fit_predict([data])[0], model2.fit_predict([data])[0])
def test_1_ward():
x = [random.randn(10, 2), random.randn(10, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward')
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward',
n_landmarks=sum(len(s) for s in x))
labels0 = clone(model1).fit(x).predict(x)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert len(labels0) == 2
assert len(labels1) == 2
assert len(labels2) == 2
eq(labels0[0], labels1[0])
eq(labels0[1], labels1[1])
eq(labels0[0], labels2[0])
eq(labels0[1], labels2[1])
assert len(np.unique(np.concatenate(labels0))) == n_clusters
def test_2_ward():
# this should be a really easy clustering problem
x = [random.randn(20, 2) + 10, random.randn(20, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward')
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward',
landmark_strategy='random',
random_state=random, n_landmarks=20)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert adjusted_rand_score(np.concatenate(labels1),
np.concatenate(labels2)) == 1.0
def test_alanine_dipeptide():
# test for rmsd metric compatibility with ward clustering
# keep n_landmarks small or this will get really slow
trajectories = AlanineDipeptide().get_cached().trajectories
n_clusters = 4
model = LandmarkAgglomerative(n_clusters=n_clusters, n_landmarks=20,
linkage='ward', metric='rmsd')
labels = model.fit_predict(trajectories[0][0:100])
assert len(np.unique(np.concatenate(labels))) <= n_clusters
def test_cluster_centers():
x = [random.randn(20, 2) + 10, random.randn(20, 2)]
n_clusters = np.random.randint(2, 7)
model = LandmarkAgglomerative(n_clusters=n_clusters,
linkage='ward')
labels = model.fit_predict(x)
print(model.cluster_centers_)
assert model.cluster_centers_.shape == (n_clusters, 2)
| lgpl-2.1 |
ShaningSoul/pi-qmc | python/piworld/SKOmegaView.py | 2 | 1940 | #-*- coding: utf-8 -*-
import math,scipy,numpy as np,tables,pylab
import math,scipy,numpy as np,tables,matplotlib.pyplot as plt
from scipy.fftpack import *
from scipy.optimize import leastsq
import tables, pitools
from pitools import Unit
from EstimatorView import *
class SKOmegaView(EstimatorView):
def __init__(self, estimatorNode, data, parent=None):
EstimatorView.__init__(self,parent)
#Read the data and do some setup.
self.polk = estimatorNode.node.read()
self.boxl = data.superCell
#Divide correlation by total volume to get polarizability (definition).
self.polk /= self.boxl.prod()
# Make grid of k values for plotting and analysis.
# Uses some python magic, especialy map and ogrid.
nx = self.polk.shape[2:-1]
def myslice(n): return slice(-n/2,n/2)
kval = (2*math.pi/self.boxl)*np.array(np.ogrid[map(myslice,nx)])
self.kgrid = np.sqrt(np.fft.fftshift((kval**2).sum(0)))
self.plot = self.PlotWidget(self)
vbox = QtGui.QVBoxLayout()
vbox.setSpacing(0)
vbox.addWidget(self.plot,10)
self.setLayout(vbox)
class PlotWidget(MyMplCanvas):
def __init__(self, data):
self.data = data
MyMplCanvas.__init__(self)
def computeInitialFigure(self):
self.axes = self.figure.add_axes([0.14,0.15,0.84,0.83])
plt.rc('font', family='serif', size=9)
#Pick off dimensions of the grid.
nx = self.data.polk.shape[2:-1]
npart = self.data.polk.shape[0]
self.axes.axhline(0)
ifreq = 0
kaxis = self.data.kgrid.reshape(-1)
for ipart in xrange(npart):
for jpart in xrange(npart):
intra = np.real(self.data.polk[ipart,jpart,...,ifreq])
self.axes.plot(kaxis[1:], intra.reshape(-1)[1:], ls='None',
marker=".",ms=1.5)
self.axes.set_xlabel(r"$q$")
self.axes.set_ylabel(r"$\chi_{nn}$")
self.axes.axis(xmax=16*math.pi/self.data.boxl[0])
| gpl-2.0 |
Harefoot/TurboQUALTX | ET_Utils/QUALTX_Utils/__init__.py | 1 | 14258 | #ET 20160210
#This script reads in a QUALTX output file and figures out what and where the DO sag is
import os
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import sys
sys.path.insert(0,r'M:\Library\Python\Packages')
import struct
def parse_card_df(ldf,REACH_cols,fws,value_row_offset,startstring,endstring):
REACH_mask = ldf.index[ldf['lines'] == startstring]
columns = REACH_cols
count = REACH_mask[0]+value_row_offset
ENDmask = ldf.index[ldf['lines'] == endstring][0]
#print count
#print ENDmask
cldf = ldf[count:ENDmask].reset_index()
# print REACH_mask
# print startstring
# print count
# print cldf['lines'][min(cldf.index)]
# print cldf['lines'][max(cldf.index)]
# print endstring
# print ENDmask
blanks = np.asarray(np.zeros(len(cldf)),dtype = str)
blanks[:] = ""
for column in columns:
#print column
cldf[column] = blanks
#fws = [12,3,2,32,10,10,10]
fwstr = ""
for fw in fws:
fwstr = fwstr+str(fw)+'s'
for i in cldf.index:
templine = cldf['lines'][i]
#Scroll all the way to the right to see the end of the statement below
templine = (templine + ' ')[0:np.sum(fws)]
# print templine
values = struct.unpack(fwstr,templine)
values = [value.strip() for value in values]
if startstring =='$$$ DATA TYPE 8 (REACH IDENTIFICATION DATA) $$$':
#Split the reach name by to
values[4] = values[4].split("TO")[0].strip()
temp = values[3].split(" TO ")
values.append(temp[0].strip())
try:
values.append(temp[1].strip())
except IndexError:
#print "Can't split by 'TO'"
values.append("")
pass
if startstring =='$$$ DATA TYPE 21 (HEADWATER DATA FOR DO, BOD, AND NITROGEN) $$$':
#Remove any illegal characters and replace with underscores
Illegal_chars = [r"/",r"*"]
for Illegal_char in Illegal_chars:
values[2] = '_'.join(values[2].split(Illegal_char))
for j in range(len(columns)):
#print i
#print columns[j]
#print values[j]
try:
cldf[columns[j]][i] = values[j]
except IndexError:
print "j ="+str(j)
print "i ="+str(i)
print "values ="
print values
print len(values)
print "columns ="
print columns
print len(columns)
cldf.to_csv(r'\\aus1.aus.apai\share\Projects\0380\003-01\2-0 Wrk Prod\2-8 MODELS\20160422_Model_Resegmentation\cldf.csv')
# if startstring =='$$$ DATA TYPE 21 (HEADWATER DATA FOR DO, BOD, AND NITROGEN) $$$':
# cldf.to_csv(r'\\aus1.aus.apai\share\Projects\0380\003-01\2-0 Wrk Prod\2-8 MODELS\20160422_Model_Resegmentation\cldf.csv')
# ldf.to_csv(r'\\aus1.aus.apai\share\Projects\0380\003-01\2-0 Wrk Prod\2-8 MODELS\20160422_Model_Resegmentation\ldf.csv')
#
cldf = cldf[columns]
return cldf
#f.close()
#def parse_QUALTXoutfile(inputfolder, QUALTXoutfile):
def parse_QUALTXoutfile(QUALTXoutfile):
#---------------------------------------------------------------------------------------------------------
#Hard code in the WQC headers
WQC = ['ELEM_NO.', 'ENDING_DIST', 'TEMP_DEG_C', 'SALN_PPT', 'CM-I_*', 'CM-II_*', 'DO_MG/L',
'BOD_MG/L', 'EBOD_MG/L', 'ORGN_MG/L', 'NH3_MG/L', 'NO3+2_MG/L', 'TOTN_MG/L',
'PHOS_MG/L', 'CHL_A_UG/L', 'MACRO_**', 'COLI_#/100ML', 'NCM_*']
#---------------------------------------------------------------------------------------------------------
#Figure out the original input file first
#Read all rows into a 1D array
# f = open(QUALTXoutfile,'r')
# lines = f.readlines()
# f.close()
#
# #Save lines as dataframe for querying
# Sdict = {'lines':lines}
# ldf = pd.DataFrame(Sdict)
#
#
# #remove the silly \n
# #and remove leading leading and trailing spaces
# for i in ldf.index:
# templine = ldf['lines'][i].strip()
# ldf['lines'][i] = templine.split('\n')[0]
#Use pandas to parse the outfile into lines (pandas can read urls)
#Use python engine because c engine cannot handle regex separators, hope it doesn't adversely affect performance by too much
#__main__:1: ParserWarning: Falling back to the 'python' engine because the 'c' engine does not support
#regex separators; you can avoid this warning by specifying engine='python'
#Note that doing the following will remove any blank lines. Therefore value offsets will need to be readjusted
ldf = pd.read_csv(QUALTXoutfile,names = ['lines'],sep = r"K1mp055ibe5tr1n6P4rs3r",engine = 'python')
ldf['CARD'] = ldf['lines']
ldf['CARD'][:] = ''
#We are only interested in reading results from the outputfile
#no need to read echos from input file
#Do a search for WATER QUALITY CONSTITUENT VALUES
WQSV_mask = ldf.index[ldf['lines'] == 'WATER QUALITY CONSTITUENT VALUES']
WQdf = ldf.copy()
WQC = ['Stream']+WQC
blanks = np.asarray(np.zeros(len(WQdf)),dtype = str)
blanks[:] = ""
for WQCe in WQC:
WQdf[WQCe] = np.zeros(len(WQdf))
WQdf['Stream'] = blanks
StreamNames = []
#Loop through each report and creat a WQ csv
row = 0
for i in WQSV_mask:
#Grab the name of the stream
StreamName = ldf['lines'][i-1].split(r':')[1].strip()
#Remove any illegal characters and replace with underscores
Illegal_chars = [r"/",r"*"]
for Illegal_char in Illegal_chars:
StreamName = '_'.join(StreamName.split(Illegal_char))
print StreamName
count = i+3
while True:
templine = ldf['lines'][count]
values = templine.split()
values = [StreamName]+values
if len(values)==len(WQC):
#f.write(StreamName+','+','.join(values)+'\n')
for j in range(len(WQC)):
WQdf[WQC[j]][row] = values[j]
row = row + 1
count = count + 1
else:
break
#f.close()
StreamNames.append(StreamName)
#Parse the reaches
#http://www.gossamer-threads.com/lists/python/python/93022
startstring = '$$$ DATA TYPE 8 (REACH IDENTIFICATION DATA) $$$'
#REACH_cols = ['CARD TYPE','REACH','ID','NAME','BEGIN REACH KM','END REACH KM','ELEM LENGTH KM','REACH LENGTH KM','ELEMS PER RCH','BEGIN ELEM NUM','END ELEM NUM']
REACH_cols = ['CARD TYPE','REACH','ID','NAME','BEGIN REACH KM','END REACH KM','ELEM LENGTH KM','REACH LENGTH KM',
'ELEMS PER RCH','BEGIN ELEM NUM','END ELEM NUM','BEGIN NAME','END NAME']
fws = [11,7,4,37,12,8,9,10,9,8,4]
value_row_offset = 4
endstring = 'ENDATA08'
Rdf = parse_card_df(ldf,REACH_cols,fws,value_row_offset,startstring,endstring)
#Parse the hydraulics
startstring = '$$$ DATA TYPE 9 (ADVECTIVE HYDRAULIC COEFFICIENTS) $$$'
REACH_cols = ['CARD TYPE','REACH','ID','VELOCITY_A','VELOCITY_B','DEPTH_C','DEPTH_D','DEPTH_E','MANNINGS_N']
fws = [15,7,4,15,15,15,15,15,17]
value_row_offset = 3
endstring = 'ENDATA09'
HYdf = parse_card_df(ldf,REACH_cols,fws,value_row_offset,startstring,endstring)
#Parse the headwaters
startstring = '$$$ DATA TYPE 21 (HEADWATER DATA FOR DO, BOD, AND NITROGEN) $$$'
REACH_cols = ['CARD TYPE','ELEMENT','NAME','DO','BOD','ORG-N','NH3','NO3+2']
fws = [14,10,25,7,10,10,10,10]
value_row_offset = 2
endstring = 'ENDATA21'
Hdf = parse_card_df(ldf,REACH_cols,fws,value_row_offset,startstring,endstring)
AllWQdf = WQdf[WQdf['Stream']!=""].reset_index()
AllWQdf = AllWQdf[WQC]
return StreamNames, Rdf, Hdf, AllWQdf
def Plot_QUALTX(StreamNames, Rdf, Hdf, AllWQdf,outputfolder,basename,Scenario,DOstd = np.nan,WQC_of_interest = ['DO_MG/L','BOD_MG/L','NH3_MG/L','NO3+2_MG/L','PHOS_MG/L'],loc = 1):
import ET_Utils.Plot_Utils
figs = []
for StreamName in StreamNames:
#Start Plotting
WQdf = AllWQdf[AllWQdf['Stream']==StreamName].copy()
SRdf = Rdf[Rdf['StreamName']==StreamName].reset_index()
outputpdf = os.path.join(outputfolder, StreamName+'.pdf')
pageorientation = 'landscape'
fig, pp, axisartist = ET_Utils.Plot_Utils.setup_page(outputpdf, pageorientation)
numrows = 1
numcols = 1
subplot_counter = 1
ax = fig.add_subplot(axisartist.Subplot(fig, numrows,numcols,subplot_counter))
#x_range = [max(WQdf['ENDING_DIST']),min(WQdf['ENDING_DIST'])]
elem_length = np.abs(WQdf['ENDING_DIST'][min(WQdf.index)+1]-WQdf['ENDING_DIST'][min(WQdf.index)])
x_range = [min(WQdf['ENDING_DIST']),max(WQdf['ENDING_DIST'])+elem_length]
#y_range = [0,25]
y_label = 'mg/L'
x_label = 'km'
ls = []
maxy = []
for tempWQC in WQC_of_interest:
maxy.append(max(WQdf[tempWQC]))
y_range = [0,max(maxy)]
x_labelsize = 15
y_labelsize = 15
ET_Utils.Plot_Utils.setup_axes(ax, x_range, y_range, x_label, y_label,x_labelsize = x_labelsize,y_labelsize = y_labelsize)#, x_tickinterval = x_tickinterval, y_tickinterval = y_tickinterval)
ax.axis["bottom"].label.set_pad(6) #specifies number of points between axis title and axis
ax.invert_xaxis()
#matplotlib.rcParams.update({'font.size': 22})
lws = np.zeros(len(WQC_of_interest))+2
plot_colors = ['blue','black','green','cyan','orange']
plot_symbols = ['-','-','-','-','-']
markersizes = np.zeros(len(WQC_of_interest))+1
for k in range(0,len(WQC_of_interest)):
ls.append(ax.plot(np.asarray(WQdf['ENDING_DIST']),np.asarray(WQdf[WQC_of_interest[k]]),plot_symbols[k], lw = lws[k],markersize=markersizes[k],
color = plot_colors[k],label = WQC_of_interest[k]))
#Show grid
#ax.grid(True,linestyle='-',color='0.4', zorder=1)
#Overplot reach start and end points of each reach
if len(SRdf) > 0:
Points1 = list(SRdf['BEGIN NAME'])
Rkms1 = list(SRdf['BEGIN REACH KM'])
if len(SRdf) > 1:
Points2 = [SRdf['END NAME'][SRdf.index[-1]]]
Rkms2 = [SRdf['END REACH KM'][SRdf.index[-1]]]
else:
Points2 = list(SRdf['END NAME'])
Rkms2 = list(SRdf['END REACH KM'])
Points = Points1 + Points2
#Rkms = Rkms1.append(Rkms2)
Rkms = Rkms1+Rkms2
for i in range(0,len(Points)):
ax.plot([Rkms[i],Rkms[i]],[-999,999],'--',color = 'k')
ax.text(Rkms[i],np.mean(y_range),Points[i],va = 'center',rotation = 'vertical')
#if len(DOstds) > 0:
if DOstd != np.nan:
DOstds = [DOstd,DOstd-0.2]
vas = ['bottom','top']
for i in range(len(DOstds)):
ax.plot([-999,999],[DOstds[i],DOstds[i]],'--',color = 'r')
ax.text(np.mean(x_range),DOstds[i],"DO std = "+"{:4.1f}".format(DOstds[i])+" mg/L",va = vas[i],color = 'r')
#Plot legend
handles, labels = ax.get_legend_handles_labels()
#legend at top right corner, loc = 1
#legend at top left corner, loc = 2
#legend at bottom left corner, loc = 3
#legend at bottom right corner, loc = 4
ax.legend(handles, labels, loc=loc)
fig.text(0.1,0.05, "Water quality profiles for " +StreamName + '.',fontsize = 16)
fig.text(0.1,0.02, basename+' ('+Scenario+').',fontsize = 12)
figs.append(fig)
pp.savefig(fig)
pp.close()
ET_Utils.Plot_Utils.make_png(outputpdf)
#print "monkey"
return figs
#def Process_QUALTX(inputfolder,QUALTXoutfile,Scenario,DOstds = [],
def Process_QUALTX(QUALTXoutfile,Scenario,DOstd = np.nan,
WQC_of_interest = ['DO_MG/L','BOD_MG/L','NH3_MG/L','NO3+2_MG/L','PHOS_MG/L'],loc = 1,plot_pdf = 1):
#inputfile = os.path.join(inputfolder,QUALTXoutfile)
#inputfile = QUALTXoutfile
#StreamNames, Rdf, Hdf, AllWQdf = parse_QUALTXoutfile(outputfolder, inputfile)#,HYDRcsv,REACHcsv)
StreamNames, Rdf, Hdf, AllWQdf = parse_QUALTXoutfile(QUALTXoutfile)#,HYDRcsv,REACHcsv)
Hdfcols = list(Hdf.columns)
Hdf = pd.merge(Hdf,Rdf,left_on = 'ELEMENT',right_on = 'BEGIN ELEM NUM', how = "inner", suffixes = ['','_right']).reset_index()
Hdfcols.append('ID')
Hdf = Hdf[Hdfcols]
Rdfcols = list(Rdf.columns)
Rdf = pd.merge(Rdf,Hdf,on = 'ID', how = "outer", suffixes = ['','_right']).reset_index()
Rdf['StreamName'] = Rdf['NAME_right']
Rdfcols.append('StreamName')
Rdf = Rdf[Rdfcols]
if plot_pdf == 1:
outputfolder = os.path.dirname(QUALTXoutfile)
basename = os.path.basename(QUALTXoutfile)
outputsubfolder = "_".join(basename.split("."))+"_plots"
outputfolder = os.path.join(outputfolder,outputsubfolder)
if os.path.exists(outputfolder) == False:
os.mkdir(outputfolder)
Rdf.to_csv(os.path.join(outputfolder,'REACH.csv'),index = False)
Hdf.to_csv(os.path.join(outputfolder,'HEAD2.csv'),index = False)
AllWQdf.to_csv(os.path.join(outputfolder,'ALLWQ.csv'),index = False)
Plot_QUALTX(StreamNames, Rdf, Hdf, AllWQdf,outputfolder,basename,Scenario,DOstd = DOstd,WQC_of_interest = WQC_of_interest,loc = 1)
return StreamNames, Rdf, Hdf, AllWQdf
| mit |
louispotok/pandas | pandas/tests/indexes/common.py | 2 | 41556 | # -*- coding: utf-8 -*-
import pytest
from pandas import compat
from pandas.compat import PY3
import numpy as np
from pandas import (Series, Index, Float64Index, Int64Index, UInt64Index,
RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex,
TimedeltaIndex, PeriodIndex, IntervalIndex, isna)
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas.core.dtypes.common import needs_i8_conversion
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas._libs.tslib import iNaT
import pandas.util.testing as tm
import pandas as pd
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'nbytes']
def setup_indices(self):
for name, idx in self.indices.items():
setattr(self, name, idx)
def verify_pickle(self, indices):
unpickled = tm.round_trip_pickle(indices)
assert indices.equals(unpickled)
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
pytest.raises(TypeError, self._holder)
def test_to_series(self):
# assert that we are creating a copy of the index
idx = self.create_index()
s = idx.to_series()
assert s.values is not idx.values
assert s.index is not idx
assert s.name == idx.name
def test_to_series_with_arguments(self):
# GH18699
# index kwarg
idx = self.create_index()
s = idx.to_series(index=idx)
assert s.values is not idx.values
assert s.index is idx
assert s.name == idx.name
# name kwarg
idx = self.create_index()
s = idx.to_series(name='__test')
assert s.values is not idx.values
assert s.index is not idx
assert s.name != idx.name
def test_to_frame(self):
# see gh-15230
idx = self.create_index()
name = idx.name or 0
df = idx.to_frame()
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == name
assert df[name].values is not idx.values
df = idx.to_frame(index=False)
assert df.index is not idx
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
pytest.raises(NotImplementedError, idx.shift, 1)
pytest.raises(NotImplementedError, idx.shift, 1, 2)
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = 'foo'
result = pd.Index(expected)
tm.assert_index_equal(result, expected)
result = pd.Index(expected, name='bar')
expected.name = 'bar'
tm.assert_index_equal(result, expected)
else:
expected.names = ['foo', 'bar']
result = pd.Index(expected)
tm.assert_index_equal(
result, Index(Index([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'),
names=['foo', 'bar']))
result = pd.Index(expected, names=['A', 'B'])
tm.assert_index_equal(
result,
Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')],
dtype='object'), names=['A', 'B']))
def test_numeric_compat(self):
idx = self.create_index()
tm.assert_raises_regex(TypeError, "cannot perform __mul__",
lambda: idx * 1)
tm.assert_raises_regex(TypeError, "cannot perform __rmul__",
lambda: 1 * idx)
div_err = "cannot perform __truediv__" if PY3 \
else "cannot perform __div__"
tm.assert_raises_regex(TypeError, div_err, lambda: idx / 1)
div_err = div_err.replace(' __', ' __r')
tm.assert_raises_regex(TypeError, div_err, lambda: 1 / idx)
tm.assert_raises_regex(TypeError, "cannot perform __floordiv__",
lambda: idx // 1)
tm.assert_raises_regex(TypeError, "cannot perform __rfloordiv__",
lambda: 1 // idx)
def test_logical_compat(self):
idx = self.create_index()
tm.assert_raises_regex(TypeError, 'cannot perform all',
lambda: idx.all())
tm.assert_raises_regex(TypeError, 'cannot perform any',
lambda: idx.any())
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with tm.assert_raises_regex(ValueError, 'Invalid fill method'):
idx.get_indexer(idx, method='invalid')
def test_get_indexer_consistency(self):
# See GH 16819
for name, index in self.indices.items():
if isinstance(index, IntervalIndex):
continue
if index.is_unique or isinstance(index, CategoricalIndex):
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, e):
indexer = index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
def test_dtype_str(self, indices):
dtype = indices.dtype_str
assert isinstance(dtype, compat.string_types)
assert dtype == str(indices.dtype)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert '...' not in str(idx)
def test_wrong_number_names(self, indices):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
tm.assert_raises_regex(ValueError, "^Length", testit, indices)
def test_set_name_methods(self, indices):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if isinstance(indices, MultiIndex):
return
original_name = indices.name
new_ind = indices.set_names([new_name])
assert new_ind.name == new_name
assert indices.name == original_name
res = indices.rename(new_name, inplace=True)
# should return None
assert res is None
assert indices.name == new_name
assert indices.names == [new_name]
# with tm.assert_raises_regex(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with tm.assert_raises_regex(ValueError, "Level must be None"):
indices.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
indices.rename(name, inplace=True)
assert indices.name == name
assert indices.names == [name]
def test_hash_error(self, indices):
index = indices
tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__, hash, indices)
def test_copy_name(self):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
for name, index in compat.iteritems(self.indices):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
for name, index in compat.iteritems(self.indices):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
def test_copy_and_deepcopy(self, indices):
from copy import copy, deepcopy
if isinstance(indices, MultiIndex):
return
for func in (copy, deepcopy):
idx_copy = func(indices)
assert idx_copy is not indices
assert idx_copy.equals(indices)
new_copy = indices.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_duplicates(self, indices):
if type(indices) is not self._holder:
return
if not len(indices) or isinstance(indices, MultiIndex):
return
idx = self._holder([indices[0]] * 5)
assert not idx.is_unique
assert idx.has_duplicates
def test_unique(self, indices):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if isinstance(indices, (MultiIndex, CategoricalIndex)):
return
# GH 17896
expected = indices.drop_duplicates()
for level in 0, indices.name, None:
result = indices.unique(level=level)
tm.assert_index_equal(result, expected)
for level in 3, 'wrong':
pytest.raises((IndexError, KeyError), indices.unique, level=level)
def test_unique_na(self):
idx = pd.Index([2, np.nan, 2, 1], name='my_index')
expected = pd.Index([2, np.nan, 1], name='my_index')
result = idx.unique()
tm.assert_index_equal(result, expected)
def test_get_unique_index(self, indices):
# MultiIndex tested separately
if not len(indices) or isinstance(indices, MultiIndex):
return
idx = indices[[0] * 5]
idx_unique = indices[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique
try:
assert not idx_unique.hasnans
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not indices._can_hold_na:
return
if needs_i8_conversion(indices):
vals = indices.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = indices.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
idx_nan = indices._shallow_copy(vals)
idx_unique_nan = indices._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique
assert idx_nan.dtype == indices.dtype
assert idx_unique_nan.dtype == indices.dtype
for dropna, expected in zip([False, True],
[idx_unique_nan,
idx_unique]):
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, expected)
def test_sort(self, indices):
pytest.raises(TypeError, indices.sort)
def test_mutability(self, indices):
if not len(indices):
return
pytest.raises(TypeError, indices.__setitem__, 0, indices[0])
def test_view(self, indices):
assert indices.view().name == indices.name
def test_compat(self, indices):
assert indices.tolist() == list(indices)
def test_memory_usage(self):
for name, index in compat.iteritems(self.indices):
result = index.memory_usage()
if len(index):
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_argsort(self):
for k, ind in self.indices.items():
# separately tested
if k in ['catIndex']:
continue
result = ind.argsort()
expected = np.array(ind).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
for k, ind in self.indices.items():
result = np.argsort(ind)
expected = ind.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(ind), (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg,
np.argsort, ind, axis=1)
msg = "the 'kind' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argsort,
ind, kind='mergesort')
msg = "the 'order' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argsort,
ind, order=('a', 'b'))
def test_pickle(self, indices):
self.verify_pickle(indices)
original_name, indices.name = indices.name, 'foo'
self.verify_pickle(indices)
indices.name = original_name
def test_take(self):
indexer = [4, 3, 0, 2]
for k, ind in self.indices.items():
# separate
if k in ['boolIndex', 'tuples', 'empty']:
continue
result = ind.take(indexer)
expected = ind[indexer]
assert result.equals(expected)
if not isinstance(ind,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
with pytest.raises(AttributeError):
ind.freq
def test_take_invalid_kwargs(self):
idx = self.create_index()
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_repeat(self):
rep = 2
i = self.create_index()
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.repeat,
i, rep, axis=0)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_setops_errorcases(self):
for name, idx in compat.iteritems(self.indices):
# # non-iterable input
cases = [0.5, 'xxx']
methods = [idx.intersection, idx.union, idx.difference,
idx.symmetric_difference]
for method in methods:
for case in cases:
tm.assert_raises_regex(TypeError,
"Input must be Index "
"or array-like",
method, case)
def test_intersection_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.intersection(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.intersection([1, 2, 3])
def test_union_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.union(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.union(case)
assert tm.equalContents(result, everything)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.union([1, 2, 3])
def test_difference_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[2:]
second = idx[:4]
answer = idx[4:]
result = first.difference(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.difference(case)
elif isinstance(idx, CategoricalIndex):
pass
elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)):
assert result.__class__ == answer.__class__
tm.assert_numpy_array_equal(result.sort_values().asi8,
answer.sort_values().asi8)
else:
result = first.difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
result = first.difference([1, 2, 3])
def test_symmetric_difference(self):
for name, idx in compat.iteritems(self.indices):
first = idx[1:]
second = idx[:-1]
if isinstance(idx, CategoricalIndex):
pass
else:
answer = idx[[0, -1]]
result = first.symmetric_difference(second)
assert tm.equalContents(result, answer)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with tm.assert_raises_regex(ValueError, msg):
result = first.symmetric_difference(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.symmetric_difference(case)
assert tm.equalContents(result, answer)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with tm.assert_raises_regex(TypeError, msg):
first.symmetric_difference([1, 2, 3])
def test_insert_base(self):
for name, idx in compat.iteritems(self.indices):
result = idx[1:4]
if not len(idx):
continue
# test 0th element
assert idx[0:4].equals(result.insert(0, idx[0]))
def test_delete_base(self):
for name, idx in compat.iteritems(self.indices):
if not len(idx):
continue
if isinstance(idx, RangeIndex):
# tested in class
continue
expected = idx[1:]
result = idx.delete(0)
assert result.equals(expected)
assert result.name == expected.name
expected = idx[:-1]
result = idx.delete(-1)
assert result.equals(expected)
assert result.name == expected.name
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
result = idx.delete(len(idx))
def test_equals(self):
for name, idx in compat.iteritems(self.indices):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
# Cannot pass in non-int64 dtype to RangeIndex
if not isinstance(idx, RangeIndex):
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(self):
# GH9947, GH10637
index_a = self.create_index()
if isinstance(index_a, PeriodIndex):
return
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == index_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == series_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with tm.assert_raises_regex(ValueError, msg):
series_a == series_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_numpy_ufuncs(self):
# test ufuncs of numpy 1.9.2. see:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html
# some functions are skipped because it may return different result
# for unicode input depending on numpy version
for name, idx in compat.iteritems(self.indices):
for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10,
np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin,
np.arccos, np.arctan, np.sinh, np.cosh, np.tanh,
np.arcsinh, np.arccosh, np.arctanh, np.deg2rad,
np.rad2deg]:
if isinstance(idx, DatetimeIndexOpsMixin):
# raise TypeError or ValueError (PeriodIndex)
# PeriodIndex behavior should be changed in future version
with pytest.raises(Exception):
with np.errstate(all='ignore'):
func(idx)
elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):
# coerces to float (e.g. np.sin)
with np.errstate(all='ignore'):
result = func(idx)
exp = Index(func(idx.values), name=idx.name)
tm.assert_index_equal(result, exp)
assert isinstance(result, pd.Float64Index)
else:
# raise AttributeError or TypeError
if len(idx) == 0:
continue
else:
with pytest.raises(Exception):
with np.errstate(all='ignore'):
func(idx)
for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:
if isinstance(idx, DatetimeIndexOpsMixin):
# raise TypeError or ValueError (PeriodIndex)
with pytest.raises(Exception):
func(idx)
elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)):
# Results in bool array
result = func(idx)
assert isinstance(result, np.ndarray)
assert not isinstance(result, Index)
else:
if len(idx) == 0:
continue
else:
with pytest.raises(Exception):
func(idx)
def test_hasnans_isnans(self):
# GH 11343, added tests for hasnans / isnans
for name, index in self.indices.items():
if isinstance(index, MultiIndex):
pass
else:
idx = index.copy()
# cases in indices doesn't include NaN
expected = np.array([False] * len(idx), dtype=bool)
tm.assert_numpy_array_equal(idx._isnan, expected)
assert not idx.hasnans
idx = index.copy()
values = idx.values
if len(index) == 0:
continue
elif isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans
def test_fillna(self):
# GH 11343
for name, index in self.indices.items():
if len(index) == 0:
pass
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with tm.assert_raises_regex(NotImplementedError, msg):
idx.fillna(idx[0])
else:
idx = index.copy()
result = idx.fillna(idx[0])
tm.assert_index_equal(result, idx)
assert result is not idx
msg = "'value' must be a scalar, passed: "
with tm.assert_raises_regex(TypeError, msg):
idx.fillna([idx[0]])
idx = index.copy()
values = idx.values
if isinstance(index, DatetimeIndexOpsMixin):
values[1] = iNaT
elif isinstance(index, (Int64Index, UInt64Index)):
continue
else:
values[1] = np.nan
if isinstance(index, PeriodIndex):
idx = index.__class__(values, freq=index.freq)
else:
idx = index.__class__(values)
expected = np.array([False] * len(idx), dtype=bool)
expected[1] = True
tm.assert_numpy_array_equal(idx._isnan, expected)
assert idx.hasnans
def test_nulls(self):
# this is really a smoke test for the methods
# as these are adequately tested for function elsewhere
for name, index in self.indices.items():
if len(index) == 0:
tm.assert_numpy_array_equal(
index.isna(), np.array([], dtype=bool))
elif isinstance(index, MultiIndex):
idx = index.copy()
msg = "isna is not defined for MultiIndex"
with tm.assert_raises_regex(NotImplementedError, msg):
idx.isna()
else:
if not index.hasnans:
tm.assert_numpy_array_equal(
index.isna(), np.zeros(len(index), dtype=bool))
tm.assert_numpy_array_equal(
index.notna(), np.ones(len(index), dtype=bool))
else:
result = isna(index)
tm.assert_numpy_array_equal(index.isna(), result)
tm.assert_numpy_array_equal(index.notna(), ~result)
def test_empty(self):
# GH 15270
index = self.create_index()
assert not index.empty
assert index[:0].empty
def test_join_self_unique(self, join_type):
index = self.create_index()
if index.is_unique:
joined = index.join(index, how=join_type)
assert (index == joined).all()
def test_searchsorted_monotonic(self, indices):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(indices, (MultiIndex, IntervalIndex)):
return
# nothing to test if the index is empty
if indices.empty:
return
value = indices[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (indices == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(indices)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if indices.is_monotonic_increasing:
ssm_left = indices._searchsorted_monotonic(value, side='left')
assert expected_left == ssm_left
ssm_right = indices._searchsorted_monotonic(value, side='right')
assert expected_right == ssm_right
ss_left = indices.searchsorted(value, side='left')
assert expected_left == ss_left
ss_right = indices.searchsorted(value, side='right')
assert expected_right == ss_right
elif indices.is_monotonic_decreasing:
ssm_left = indices._searchsorted_monotonic(value, side='left')
assert expected_left == ssm_left
ssm_right = indices._searchsorted_monotonic(value, side='right')
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
indices._searchsorted_monotonic(value, side='left')
def test_map(self):
# callable
index = self.create_index()
# we don't infer UInt64
if isinstance(index, pd.UInt64Index):
expected = index.astype('int64')
else:
expected = index
result = index.map(lambda x: x)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
index = self.create_index()
if isinstance(index, (pd.CategoricalIndex, pd.IntervalIndex)):
pytest.skip("skipping tests for {}".format(type(index)))
identity = mapper(index.values, index)
# we don't infer to UInt64 for a dict
if isinstance(index, pd.UInt64Index) and isinstance(identity, dict):
expected = index.astype('int64')
else:
expected = index
result = index.map(identity)
tm.assert_index_equal(result, expected)
# empty mappable
expected = pd.Index([np.nan] * len(index))
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
def test_putmask_with_wrong_mask(self):
# GH18368
index = self.create_index()
with pytest.raises(ValueError):
index.putmask(np.ones(len(index) + 1, np.bool), 1)
with pytest.raises(ValueError):
index.putmask(np.ones(len(index) - 1, np.bool), 1)
with pytest.raises(ValueError):
index.putmask('foo', 1)
@pytest.mark.parametrize('copy', [True, False])
@pytest.mark.parametrize('name', [None, 'foo'])
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, copy, name, ordered):
# GH 18630
index = self.create_index()
if name:
index = index.rename(name)
# standard categories
dtype = CategoricalDtype(ordered=ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, ordered=ordered)
tm.assert_index_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(index.unique().tolist()[:-1], ordered)
result = index.astype(dtype, copy=copy)
expected = CategoricalIndex(index.values, name=name, dtype=dtype)
tm.assert_index_equal(result, expected)
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
result = index.astype('category', copy=copy)
expected = CategoricalIndex(index.values, name=name)
tm.assert_index_equal(result, expected)
| bsd-3-clause |
ldirer/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
ilayn/scipy | scipy/linalg/basic.py | 7 | 67310 | #
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from warnings import warn
import numpy as np
from numpy import atleast_1d, atleast_2d
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs, _compute_lwork
from .misc import LinAlgError, _datacopied, LinAlgWarning
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh', 'matrix_balance', 'matmul_toeplitz']
# Linear equations
def _solve_check(n, info, lamch=None, rcond=None):
""" Check arguments during the different steps of the solution phase """
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'.'.format(-info))
elif 0 < info:
raise LinAlgError('Matrix is singular.')
if lamch is None:
return
E = lamch('E')
if rcond < E:
warn('Ill-conditioned matrix (rcond={:.6g}): '
'result may not be accurate.'.format(rcond),
LinAlgWarning, stacklevel=3)
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=None, check_finite=True, assume_a='gen',
transposed=False):
"""
Solves the linear equation set ``a * x = b`` for the unknown ``x``
for square ``a`` matrix.
If the data matrix is known to be a particular type then supplying the
corresponding string to ``assume_a`` key chooses the dedicated solver.
The available options are
=================== ========
generic matrix 'gen'
symmetric 'sym'
hermitian 'her'
positive definite 'pos'
=================== ========
If omitted, ``'gen'`` is the default structure.
The datatype of the arrays define which solver is called regardless
of the values. In other words, even when the complex array entries have
precisely zero imaginary parts, the complex solver will be called based
on the data type of the array.
Parameters
----------
a : (N, N) array_like
Square input data
b : (N, NRHS) array_like
Input data for the right hand side.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite. This key is deprecated
and assume_a = 'pos' keyword is recommended instead. The functionality
is the same. It will be removed in the future.
lower : bool, optional
If True, only the data contained in the lower triangle of `a`. Default
is to use upper triangle. (ignored for ``'gen'``)
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
assume_a : str, optional
Valid entries are explained above.
transposed: bool, optional
If True, ``a^T x = b`` for real matrices, raises `NotImplementedError`
for complex matrices (only for True).
Returns
-------
x : (N, NRHS) ndarray
The solution array.
Raises
------
ValueError
If size mismatches detected or input a is not square.
LinAlgError
If the matrix is singular.
LinAlgWarning
If an ill-conditioned input a is detected.
NotImplementedError
If transposed is True and input a is a complex matrix.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
Notes
-----
If the input b matrix is a 1-D array with N elements, when supplied
together with an NxN input a, it is assumed as a valid column vector
despite the apparent size mismatch. This is compatible with the
numpy.dot() behavior and the returned result is still 1-D array.
The generic, symmetric, Hermitian and positive definite solutions are
obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
LAPACK respectively.
"""
# Flags for 1-D or N-D right-hand side
b_is_1D = False
a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
n = a1.shape[0]
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input a needs to be a square matrix.')
if n != b1.shape[0]:
# Last chance to catch 1x1 scalar a and 1-D b arrays
if not (n == 1 and b1.size != 0):
raise ValueError('Input b has to have same number of rows as '
'input a')
# accommodate empty arrays
if b1.size == 0:
return np.asfortranarray(b1.copy())
# regularize 1-D b arrays to 2D
if b1.ndim == 1:
if n == 1:
b1 = b1[None, :]
else:
b1 = b1[:, None]
b_is_1D = True
# Backwards compatibility - old keyword.
if sym_pos:
assume_a = 'pos'
if assume_a not in ('gen', 'sym', 'her', 'pos'):
raise ValueError('{} is not a recognized matrix structure'
''.format(assume_a))
# for a real matrix, describe it as "symmetric", not "hermitian"
# (lapack doesn't know what to do with real hermitian matrices)
if assume_a == 'her' and not np.iscomplexobj(a1):
assume_a = 'sym'
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
# Get the correct lamch function.
# The LAMCH functions only exists for S and D
# So for complex values we have to convert to real/double.
if a1.dtype.char in 'fF': # single precision
lamch = get_lapack_funcs('lamch', dtype='f')
else:
lamch = get_lapack_funcs('lamch', dtype='d')
# Currently we do not have the other forms of the norm calculators
# lansy, lanpo, lanhe.
# However, in any case they only reduce computations slightly...
lange = get_lapack_funcs('lange', (a1,))
# Since the I-norm and 1-norm are the same for symmetric matrices
# we can collect them all in this one call
# Note however, that when issuing 'gen' and form!='none', then
# the I-norm should be used
if transposed:
trans = 1
norm = 'I'
if np.iscomplexobj(a1):
raise NotImplementedError('scipy.linalg.solve can currently '
'not solve a^T x = b or a^H x = b '
'for complex matrices.')
else:
trans = 0
norm = '1'
anorm = lange(norm, a1)
# Generalized case 'gesv'
if assume_a == 'gen':
gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
(a1, b1))
lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
_solve_check(n, info)
x, info = getrs(lu, ipvt, b1,
trans=trans, overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = gecon(lu, anorm, norm=norm)
# Hermitian case 'hesv'
elif assume_a == 'her':
hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
'hesv_lwork'), (a1, b1))
lwork = _compute_lwork(hesv_lw, n, lower)
lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = hecon(lu, ipvt, anorm)
# Symmetric case 'sysv'
elif assume_a == 'sym':
sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
'sysv_lwork'), (a1, b1))
lwork = _compute_lwork(sysv_lw, n, lower)
lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = sycon(lu, ipvt, anorm)
# Positive definite case 'posv'
else:
pocon, posv = get_lapack_funcs(('pocon', 'posv'),
(a1, b1))
lu, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = pocon(lu, anorm)
_solve_check(n, info, lamch, rcond)
if b_is_1D:
x = x.ravel()
return x
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=None, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
Examples
--------
Solve the lower triangular system a x = b, where::
[3 0 0 0] [4]
a = [2 1 0 0] b = [2]
[1 0 1 0] [4]
[1 1 1 1] [2]
>>> from scipy.linalg import solve_triangular
>>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
>>> b = np.array([4, 2, 4, 2])
>>> x = solve_triangular(a, b, lower=True)
>>> x
array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
>>> a.dot(x) # Check the result
array([ 4., 2., 4., 2.])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('shapes of a {} and b {} are incompatible'
.format(a1.shape, b1.shape))
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=', overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
if a1.flags.f_contiguous or trans == 2:
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
else:
# transposed system is solved since trtrs expects Fortran ordering
x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower,
trans=not trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %dth argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=None, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
Examples
--------
Solve the banded system a x = b, where::
[5 2 -1 0 0] [0]
[1 4 2 -1 0] [1]
a = [0 1 3 2 -1] b = [2]
[0 0 1 2 2] [2]
[0 0 0 1 1] [3]
There is one nonzero diagonal below the main diagonal (l = 1), and
two above (u = 2). The diagonal banded form of the matrix is::
[* * -1 -1 -1]
ab = [* 2 2 2 2]
[5 4 3 2 1]
[1 1 1 1 *]
>>> from scipy.linalg import solve_banded
>>> ab = np.array([[0, 0, -1, -1, -1],
... [0, 2, 2, 2, 2],
... [5, 4, 3, 2, 1],
... [1, 1, 1, 1, 0]])
>>> b = np.array([0, 1, 2, 2, 3])
>>> x = solve_banded((1, 2), ab, b)
>>> x
array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(nlower, nupper) = l_and_u
if nlower + nupper + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (nlower + nupper + 1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if nlower == nupper == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
a2[nlower:, :] = a1
lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal '
'gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
Examples
--------
Solve the banded system A x = b, where::
[ 4 2 -1 0 0 0] [1]
[ 2 5 2 -1 0 0] [2]
A = [-1 2 6 2 -1 0] b = [2]
[ 0 -1 2 7 2 -1] [3]
[ 0 0 -1 2 8 2] [3]
[ 0 0 0 -1 2 9] [3]
>>> from scipy.linalg import solveh_banded
`ab` contains the main diagonal and the nonzero diagonals below the
main diagonal. That is, we use the lower form:
>>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
... [ 2, 2, 2, 2, 2, 0],
... [-1, -1, -1, -1, 0, 0]])
>>> b = np.array([1, 2, 2, 3, 3, 3])
>>> x = solveh_banded(ab, b, lower=True)
>>> x
array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
0.34733894])
Solve the Hermitian banded system H x = b, where::
[ 8 2-1j 0 0 ] [ 1 ]
H = [2+1j 5 1j 0 ] b = [1+1j]
[ 0 -1j 9 -2-1j] [1-2j]
[ 0 0 -2+1j 6 ] [ 0 ]
In this example, we put the upper diagonals in the array `hb`:
>>> hb = np.array([[0, 2-1j, 1j, -2-1j],
... [8, 5, 9, 6 ]])
>>> b = np.array([1, 1+1j, 1-2j, 0])
>>> x = solveh_banded(hb, b)
>>> x
array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
0.10077984-0.23035393j, -0.00479904-0.09358128j])
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%dth leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %dth argument of internal '
'pbsv' % -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
Examples
--------
Solve the Toeplitz system T x = b, where::
[ 1 -1 -2 -3] [1]
T = [ 3 1 -1 -2] b = [2]
[ 6 3 1 -1] [2]
[10 6 3 1] [5]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> b = np.array([1, 2, 2, 5])
>>> from scipy.linalg import solve_toeplitz, toeplitz
>>> x = solve_toeplitz((c, r), b)
>>> x
array([ 1.66666667, -1. , -2.66666667, 2.33333333])
Check the result by creating the full Toeplitz matrix and
multiplying it by `x`. We should get `b`.
>>> T = toeplitz(c, r)
>>> T.dot(x)
array([ 1., 2., 2., 5.])
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
r, c, b, dtype, b_shape = _validate_args_for_toeplitz_ops(
c_or_cr, b, check_finite, keep_b_shape=True)
# Form a 1-D array of values to be used in the matrix, containing a
# reversed copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
for i in range(b.shape[1])])
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant : circulant matrix
Notes
-----
For a 1-D vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Shapes of c {} and b {} are incompatible'
.format(c.shape, b.shape))
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2D.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
# XXX: I found no advantage or disadvantage of using finv.
# finv, = get_flinalg_funcs(('inv',),(a1,))
# if finv is not None:
# a_inv,info = finv(a1,overwrite_a=overwrite_a)
# if info==0:
# return a_inv
# if info>0: raise LinAlgError, "singular matrix"
# if info<0: raise ValueError('illegal value in %d-th argument of '
# 'internal inv.getrf|getri'%(-info))
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
# Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
# Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left-hand side array
b : (M,) or (M, K) array_like
Right hand side array
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : (K,) ndarray or float
Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
``ndim(A) == n`` (returns a scalar if b is 1-D). Otherwise a
(0,)-shaped array is returned.
rank : int
Effective rank of `a`.
s : (min(M, N),) ndarray or None
Singular values of `a`. The condition number of a is
``abs(s[0] / s[-1])``.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are not compatible.
See Also
--------
scipy.optimize.nnls : linear least squares with non-negativity constraint
Notes
-----
When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
array and `s` is always ``None``.
Examples
--------
>>> from scipy.linalg import lstsq
>>> import matplotlib.pyplot as plt
Suppose we have the following data:
>>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
>>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
to this data. We first form the "design matrix" M, with a constant
column of 1s and a column containing ``x**2``:
>>> M = x[:, np.newaxis]**[0, 2]
>>> M
array([[ 1. , 1. ],
[ 1. , 6.25],
[ 1. , 12.25],
[ 1. , 16. ],
[ 1. , 25. ],
[ 1. , 49. ],
[ 1. , 72.25]])
We want to find the least-squares solution to ``M.dot(p) = y``,
where ``p`` is a vector with length 2 that holds the parameters
``a`` and ``b``.
>>> p, res, rnk, s = lstsq(M, y)
>>> p
array([ 0.20925829, 0.12013861])
Plot the data and the fitted curve.
>>> plt.plot(x, y, 'o', label='data')
>>> xx = np.linspace(0, 9, 101)
>>> yy = p[0] + p[1]*xx**2
>>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('Input array a should be 2D')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('Shape mismatch: a and b should have the same number'
' of rows ({} != {}).'.format(m, b1.shape[0]))
if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
if n == 0:
residues = np.linalg.norm(b1, axis=0)**2
else:
residues = np.empty((0,))
return x, residues, 0, np.empty((0,))
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver),
(a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, atol=None, rtol=None, return_rank=False, check_finite=True,
cond=None, rcond=None):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition ``U @ S @ V`` in the economy mode and picking
up only the columns/rows that are associated with significant singular
values.
If ``s`` is the maximum singular value of ``a``, then the
significance cut-off value is determined by ``atol + rtol * s``. Any
singular value below this value is assumed insignificant.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
atol: float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol: float, optional
Relative threshold term, default value is ``max(M, N) * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
cond, rcond : float, optional
In older versions, these values were meant to be used as ``atol`` with
``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
the code was not correct. Thus using these are strongly discouraged and
the tolerances above are recommended instead. In fact, if provided,
atol, rtol takes precedence over these keywords.
.. versionchanged:: 1.7.0
Deprecated in favor of ``rtol`` and ``atol`` parameters above and
will be removed in future versions of SciPy.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> B = linalg.pinv(a)
>>> np.allclose(a, a @ B @ a)
True
>>> np.allclose(B, B @ a @ B)
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(s)
if rcond or cond:
warn('Use of the "cond" and "rcond" keywords are deprecated and '
'will be removed in future versions of SciPy. Use "atol" and '
'"rtol" keywords instead', DeprecationWarning, stacklevel=2)
# backwards compatible only atol and rtol are both missing
if (rcond or cond) and (atol is None) and (rtol is None):
atol = rcond or cond
rtol = 0.
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
rank = np.sum(s > val)
u = u[:, :rank]
u /= s[:rank]
B = (u @ vh[:rank]).conj().T
if return_rank:
return B, rank
else:
return B
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
`scipy.linalg.pinv2` is deprecated since SciPy 1.7.0, use
`scipy.linalg.pinv` instead for better tolerance control.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values; singular values smaller than this
value are considered as zero. If both are omitted, the default value
``max(M,N)*largest_singular_value*eps`` is used where ``eps`` is the
machine precision value of the datatype of ``a``.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
"""
# SciPy 1.7.0 2021-04-10
warn('scipy.linalg.pinv2 is deprecated since SciPy 1.7.0, use '
'scipy.linalg.pinv instead', DeprecationWarning, stacklevel=2)
if rcond is not None:
cond = rcond
return pinv(a=a, atol=cond, rtol=None, return_rank=return_rank,
check_finite=check_finite)
def pinvh(a, atol=None, rtol=None, lower=True, return_rank=False,
check_finite=True, cond=None, rcond=None):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a copmlex Hermitian/real symmetric
matrix using its eigenvalue decomposition and including all eigenvalues
with 'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
atol: float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol: float, optional
Relative threshold term, default value is ``N * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of `a`. (Default: lower)
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
cond, rcond : float, optional
In older versions, these values were meant to be used as ``atol`` with
``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
the code was not correct. Thus using these are strongly discouraged and
the tolerances above are recommended instead. In fact, if provided,
atol, rtol takes precedence over these keywords.
.. versionchanged:: 1.7.0
Deprecated in favor of ``rtol`` and ``atol`` parameters above and
will be removed in future versions of SciPy.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If eigenvalue algorithm does not converge.
Examples
--------
>>> from scipy.linalg import pinvh
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, a @ B @ a)
True
>>> np.allclose(B, B @ a @ B)
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(np.abs(s))
if rcond or cond:
warn('Use of the "cond" and "rcond" keywords are deprecated and '
'will be removed in future versions of SciPy. Use "atol" and '
'"rtol" keywords instead', DeprecationWarning, stacklevel=2)
# backwards compatible only atol and rtol are both missing
if (rcond or cond) and (atol is None) and (rtol is None):
atol = rcond or cond
rtol = 0.
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
above_cutoff = (abs(s) > val)
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = (u * psigma_diag) @ u.conj().T
if return_rank:
return B, len(psigma_diag)
else:
return B
def matrix_balance(A, permute=True, scale=True, separate=False,
overwrite_a=False):
"""
Compute a diagonal similarity transformation for row/column balancing.
The balancing tries to equalize the row and column 1-norms by applying
a similarity transformation such that the magnitude variation of the
matrix entries is reflected to the scaling matrices.
Moreover, if enabled, the matrix is first permuted to isolate the upper
triangular parts of the matrix and, again if scaling is also enabled,
only the remaining subblocks are subjected to scaling.
The balanced matrix satisfies the following equality
.. math::
B = T^{-1} A T
The scaling coefficients are approximated to the nearest power of 2
to avoid round-off errors.
Parameters
----------
A : (n, n) array_like
Square data matrix for the balancing.
permute : bool, optional
The selector to define whether permutation of A is also performed
prior to scaling.
scale : bool, optional
The selector to turn on and off the scaling. If False, the matrix
will not be scaled.
separate : bool, optional
This switches from returning a full matrix of the transformation
to a tuple of two separate 1-D permutation and scaling arrays.
overwrite_a : bool, optional
This is passed to xGEBAL directly. Essentially, overwrites the result
to the data. It might increase the space efficiency. See LAPACK manual
for details. This is False by default.
Returns
-------
B : (n, n) ndarray
Balanced matrix
T : (n, n) ndarray
A possibly permuted diagonal matrix whose nonzero entries are
integer powers of 2 to avoid numerical truncation errors.
scale, perm : (n,) ndarray
If ``separate`` keyword is set to True then instead of the array
``T`` above, the scaling and the permutation vectors are given
separately as a tuple without allocating the full array ``T``.
Notes
-----
This algorithm is particularly useful for eigenvalue and matrix
decompositions and in many cases it is already called by various
LAPACK routines.
The algorithm is based on the well-known technique of [1]_ and has
been modified to account for special cases. See [2]_ for details
which have been implemented since LAPACK v3.5.0. Before this version
there are corner cases where balancing can actually worsen the
conditioning. See [3]_ for such examples.
The code is a wrapper around LAPACK's xGEBAL routine family for matrix
balancing.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy import linalg
>>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
>>> y, permscale = linalg.matrix_balance(x)
>>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
array([ 3.66666667, 0.4995005 , 0.91312162])
>>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
array([ 1.2 , 1.27041742, 0.92658316]) # may vary
>>> permscale # only powers of 2 (0.5 == 2^(-1))
array([[ 0.5, 0. , 0. ], # may vary
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
References
----------
.. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for
Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
Vol.13(4), 1969, :doi:`10.1007/BF02165404`
.. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and
eigenvector computation", 2014, :arxiv:`1401.5766`
.. [3] : D.S. Watkins. A case where balancing is harmful.
Electron. Trans. Numer. Anal, Vol.23, 2006.
"""
A = np.atleast_2d(_asarray_validated(A, check_finite=True))
if not np.equal(*A.shape):
raise ValueError('The data matrix for balancing should be square.')
gebal = get_lapack_funcs(('gebal'), (A,))
B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('xGEBAL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the xGEBAL error codes.'
''.format(-info))
# Separate the permutations from the scalings and then convert to int
scaling = np.ones_like(ps, dtype=float)
scaling[lo:hi+1] = ps[lo:hi+1]
# gebal uses 1-indexing
ps = ps.astype(int, copy=False) - 1
n = A.shape[0]
perm = np.arange(n)
# LAPACK permutes with the ordering n --> hi, then 0--> lo
if hi < n:
for ind, x in enumerate(ps[hi+1:][::-1], 1):
if n-ind == x:
continue
perm[[x, n-ind]] = perm[[n-ind, x]]
if lo > 0:
for ind, x in enumerate(ps[:lo]):
if ind == x:
continue
perm[[x, ind]] = perm[[ind, x]]
if separate:
return B, (scaling, perm)
# get the inverse permutation
iperm = np.empty_like(perm)
iperm[perm] = np.arange(n)
return B, np.diag(scaling)[iperm, :]
def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
enforce_square=True):
"""Validate arguments and format inputs for toeplitz functions
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
keep_b_shape: bool
Whether to convert a (M,) dimensional b into a (M, 1) dimensional
matrix.
enforce_square: bool, optional
If True (default), this verifies that the Toeplitz matrix is square.
Returns
-------
r : array
1d array corresponding to the first row of the Toeplitz matrix.
c: array
1d array corresponding to the first column of the Toeplitz matrix.
b: array
(M,), (M, 1) or (M, K) dimensional array, post validation,
corresponding to ``b``.
dtype: numpy datatype
``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
otherwise, it is ``np.float``.
b_shape: tuple
Shape of ``b`` after passing it through ``_asarray_validated``.
"""
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
if b is None:
raise ValueError('`b` must be an array, not None.')
b = _asarray_validated(b, check_finite=check_finite)
b_shape = b.shape
is_not_square = r.shape[0] != c.shape[0]
if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
raise ValueError('Incompatible dimensions.')
is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
dtype = np.complex128 if is_cmplx else np.double
r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
if b.ndim == 1 and not keep_b_shape:
b = b.reshape(-1, 1)
elif b.ndim != 1:
b = b.reshape(b.shape[0], -1)
return r, c, b, dtype, b_shape
def matmul_toeplitz(c_or_cr, x, check_finite=False, workers=None):
"""Efficient Toeplitz Matrix-Matrix Multiplication using FFT
This function returns the matrix multiplication between a Toeplitz
matrix and a dense matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
x : (M,) or (M, K) array_like
Matrix with which to multiply.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
workers : int, optional
To pass to scipy.fft.fft and ifft. Maximum number of workers to use
for parallel computation. If negative, the value wraps around from
``os.cpu_count()``. See scipy.fft.fft for more details.
Returns
-------
T @ x : (M,) or (M, K) ndarray
The result of the matrix multiplication ``T @ x``. Shape of return
matches shape of `x`.
See Also
--------
toeplitz : Toeplitz matrix
solve_toeplitz : Solve a Toeplitz system using Levinson Recursion
Notes
-----
The Toeplitz matrix is embedded in a circulant matrix and the FFT is used
to efficiently calculate the matrix-matrix product.
Because the computation is based on the FFT, integer inputs will
result in floating point outputs. This is unlike NumPy's `matmul`,
which preserves the data type of the input.
This is partly based on the implementation that can be found in [1]_,
licensed under the MIT license. More information about the method can be
found in reference [2]_. References [3]_ and [4]_ have more reference
implementations in Python.
.. versionadded:: 1.6.0
References
----------
.. [1] Jacob R Gardner, Geoff Pleiss, David Bindel, Kilian
Q Weinberger, Andrew Gordon Wilson, "GPyTorch: Blackbox Matrix-Matrix
Gaussian Process Inference with GPU Acceleration" with contributions
from Max Balandat and Ruihan Wu. Available online:
https://github.com/cornellius-gp/gpytorch
.. [2] J. Demmel, P. Koev, and X. Li, "A Brief Survey of Direct Linear
Solvers". In Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der
Vorst, editors. Templates for the Solution of Algebraic Eigenvalue
Problems: A Practical Guide. SIAM, Philadelphia, 2000. Available at:
http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
.. [3] R. Scheibler, E. Bezzam, I. Dokmanic, Pyroomacoustics: A Python
package for audio room simulations and array processing algorithms,
Proc. IEEE ICASSP, Calgary, CA, 2018.
https://github.com/LCAV/pyroomacoustics/blob/pypi-release/
pyroomacoustics/adaptive/util.py
.. [4] Marano S, Edwards B, Ferrari G and Fah D (2017), "Fitting
Earthquake Spectra: Colored Noise and Incomplete Data", Bulletin of
the Seismological Society of America., January, 2017. Vol. 107(1),
pp. 276-291.
Examples
--------
Multiply the Toeplitz matrix T with matrix x::
[ 1 -1 -2 -3] [1 10]
T = [ 3 1 -1 -2] x = [2 11]
[ 6 3 1 -1] [2 11]
[10 6 3 1] [5 19]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> x = np.array([[1, 10], [2, 11], [2, 11], [5, 19]])
>>> from scipy.linalg import toeplitz, matmul_toeplitz
>>> matmul_toeplitz((c, r), x)
array([[-20., -80.],
[ -7., -8.],
[ 9., 85.],
[ 33., 218.]])
Check the result by creating the full Toeplitz matrix and
multiplying it by ``x``.
>>> toeplitz(c, r) @ x
array([[-20, -80],
[ -7, -8],
[ 9, 85],
[ 33, 218]])
The full matrix is never formed explicitly, so this routine
is suitable for very large Toeplitz matrices.
>>> n = 1000000
>>> matmul_toeplitz([1] + [0]*(n-1), np.ones(n))
array([1., 1., 1., ..., 1., 1., 1.])
"""
from ..fft import fft, ifft, rfft, irfft
r, c, x, dtype, x_shape = _validate_args_for_toeplitz_ops(
c_or_cr, x, check_finite, keep_b_shape=False, enforce_square=False)
n, m = x.shape
T_nrows = len(c)
T_ncols = len(r)
p = T_nrows + T_ncols - 1 # equivalent to len(embedded_col)
embedded_col = np.concatenate((c, r[-1:0:-1]))
if np.iscomplexobj(embedded_col) or np.iscomplexobj(x):
fft_mat = fft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = fft(x, n=p, axis=0, workers=workers)
mat_times_x = ifft(fft_mat*fft_x, axis=0,
workers=workers)[:T_nrows, :]
else:
# Real inputs; using rfft is faster
fft_mat = rfft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = rfft(x, n=p, axis=0, workers=workers)
mat_times_x = irfft(fft_mat*fft_x, axis=0,
workers=workers, n=p)[:T_nrows, :]
return_shape = (T_nrows,) if len(x_shape) == 1 else (T_nrows, m)
return mat_times_x.reshape(*return_shape)
| bsd-3-clause |
bundgus/python-playground | matplotlib-playground/examples/user_interfaces/embedding_webagg.py | 3 | 8190 | """
This example demonstrates how to embed matplotlib WebAgg interactive
plotting in your own web application and framework. It is not
necessary to do all this if you merely want to display a plot in a
browser or use matplotlib's built-in Tornado-based server "on the
side".
The framework being used must support web sockets.
"""
import io
try:
import tornado
except ImportError:
raise RuntimeError("This example requires tornado.")
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.websocket
from matplotlib.backends.backend_webagg_core import (
FigureManagerWebAgg, new_figure_manager_given_figure)
from matplotlib.figure import Figure
import numpy as np
import json
def create_figure():
"""
Creates a simple example figure.
"""
fig = Figure()
a = fig.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
a.plot(t, s)
return fig
# The following is the content of the web page. You would normally
# generate this using some sort of template facility in your web
# framework, but here we just use Python string formatting.
html_content = """
<html>
<head>
<!-- TODO: There should be a way to include all of the required javascript
and CSS so matplotlib can add to the set in the future if it
needs to. -->
<link rel="stylesheet" href="_static/css/page.css" type="text/css">
<link rel="stylesheet" href="_static/css/boilerplate.css" type="text/css" />
<link rel="stylesheet" href="_static/css/fbm.css" type="text/css" />
<link rel="stylesheet" href="_static/jquery/css/themes/base/jquery-ui.min.css" >
<script src="_static/jquery/js/jquery-1.11.3.min.js"></script>
<script src="_static/jquery/js/jquery-ui.min.js"></script>
<script src="mpl.js"></script>
<script>
/* This is a callback that is called when the user saves
(downloads) a file. Its purpose is really to map from a
figure and file format to a url in the application. */
function ondownload(figure, format) {
window.open('download.' + format, '_blank');
};
$(document).ready(
function() {
/* It is up to the application to provide a websocket that the figure
will use to communicate to the server. This websocket object can
also be a "fake" websocket that underneath multiplexes messages
from multiple figures, if necessary. */
var websocket_type = mpl.get_websocket_type();
var websocket = new websocket_type("%(ws_uri)sws");
// mpl.figure creates a new figure on the webpage.
var fig = new mpl.figure(
// A unique numeric identifier for the figure
%(fig_id)s,
// A websocket object (or something that behaves like one)
websocket,
// A function called when a file type is selected for download
ondownload,
// The HTML element in which to place the figure
$('div#figure'));
}
);
</script>
<title>matplotlib</title>
</head>
<body>
<div id="figure">
</div>
</body>
</html>
"""
class MyApplication(tornado.web.Application):
class MainPage(tornado.web.RequestHandler):
"""
Serves the main HTML page.
"""
def get(self):
manager = self.application.manager
ws_uri = "ws://{req.host}/".format(req=self.request)
content = html_content % {
"ws_uri": ws_uri, "fig_id": manager.num}
self.write(content)
class MplJs(tornado.web.RequestHandler):
"""
Serves the generated matplotlib javascript file. The content
is dynamically generated based on which toolbar functions the
user has defined. Call `FigureManagerWebAgg` to get its
content.
"""
def get(self):
self.set_header('Content-Type', 'application/javascript')
js_content = FigureManagerWebAgg.get_javascript()
self.write(js_content)
class Download(tornado.web.RequestHandler):
"""
Handles downloading of the figure in various file formats.
"""
def get(self, fmt):
manager = self.application.manager
mimetypes = {
'ps': 'application/postscript',
'eps': 'application/postscript',
'pdf': 'application/pdf',
'svg': 'image/svg+xml',
'png': 'image/png',
'jpeg': 'image/jpeg',
'tif': 'image/tiff',
'emf': 'application/emf'
}
self.set_header('Content-Type', mimetypes.get(fmt, 'binary'))
buff = io.BytesIO()
manager.canvas.print_figure(buff, format=fmt)
self.write(buff.getvalue())
class WebSocket(tornado.websocket.WebSocketHandler):
"""
A websocket for interactive communication between the plot in
the browser and the server.
In addition to the methods required by tornado, it is required to
have two callback methods:
- ``send_json(json_content)`` is called by matplotlib when
it needs to send json to the browser. `json_content` is
a JSON tree (Python dictionary), and it is the responsibility
of this implementation to encode it as a string to send over
the socket.
- ``send_binary(blob)`` is called to send binary image data
to the browser.
"""
supports_binary = True
def open(self):
# Register the websocket with the FigureManager.
manager = self.application.manager
manager.add_web_socket(self)
if hasattr(self, 'set_nodelay'):
self.set_nodelay(True)
def on_close(self):
# When the socket is closed, deregister the websocket with
# the FigureManager.
manager = self.application.manager
manager.remove_web_socket(self)
def on_message(self, message):
# The 'supports_binary' message is relevant to the
# websocket itself. The other messages get passed along
# to matplotlib as-is.
# Every message has a "type" and a "figure_id".
message = json.loads(message)
if message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
manager = self.application.manager
manager.handle_json(message)
def send_json(self, content):
self.write_message(json.dumps(content))
def send_binary(self, blob):
if self.supports_binary:
self.write_message(blob, binary=True)
else:
data_uri = "data:image/png;base64,{0}".format(
blob.encode('base64').replace('\n', ''))
self.write_message(data_uri)
def __init__(self, figure):
self.figure = figure
self.manager = new_figure_manager_given_figure(
id(figure), figure)
super(MyApplication, self).__init__([
# Static files for the CSS and JS
(r'/_static/(.*)',
tornado.web.StaticFileHandler,
{'path': FigureManagerWebAgg.get_static_file_path()}),
# The page that contains all of the pieces
('/', self.MainPage),
('/mpl.js', self.MplJs),
# Sends images and events to the browser, and receives
# events from the browser
('/ws', self.WebSocket),
# Handles the downloading (i.e., saving) of static images
(r'/download.([a-z0-9.]+)', self.Download),
])
if __name__ == "__main__":
figure = create_figure()
application = MyApplication(figure)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
print("http://127.0.0.1:8080/")
print("Press Ctrl+C to quit")
tornado.ioloop.IOLoop.instance().start()
| mit |
mizzao/ggplot | ggplot/themes/theme.py | 12 | 5935 | """
Theme elements:
* element_line
* element_rect
* element_text
* element_title
These elements define what operations can be performed. The specific targets,
eg. line, rect, text, title and their derivatives axis_title or axis_title_x
specify the scope of the theme application.
"""
from copy import deepcopy
from .element_target import element_target_factory, merge_element_targets
class theme(object):
"""This is an abstract base class for themes.
In general, only complete themes should should subclass this class.
Notes
-----
When subclassing there are really only two methods that need to be
implemented.
__init__: This should call super().__init__ which will define
self._rcParams. Subclasses should customize self._rcParams after calling
super().__init__. That will ensure that the rcParams are applied at
the appropriate time.
The other method is apply_theme(ax). This method takes an axes object that
has been created during the plot process. The theme should modify the
axes according.
"""
def __init__(self, complete=False, **kwargs):
"""
Provide ggplot2 themeing capabilities.
Parameters
-----------
complete : bool
Themes that are complete will override any existing themes.
themes that are not complete (ie. partial) will add to or
override specific elements of the current theme.
eg. theme_matplotlib() + theme_xkcd() will be completely
determined by theme_xkcd, but
theme_matplotlib() + theme(axis_text_x=element_text(angle=45)) will
only modify the x axis text.
kwargs**: theme_element
kwargs are theme_elements based on http://docs.ggplot2.org/current/theme.html.
Currently only a subset of the elements are implemented. In addition,
Python does not allow using '.' in argument names, so we are using '_'
instead.
For example, ggplot2 axis.ticks.y will be axis_ticks_y in Python ggplot.
"""
self.element_themes = []
self.complete = complete
self._rcParams = {}
for target_name, theme_element in kwargs.items():
self.element_themes.append(element_target_factory(target_name,
theme_element))
def apply_theme(self, ax):
"""apply_theme will be called with an axes object after plot has completed.
Complete themes should implement this method if post plot themeing is
required.
"""
pass
def get_rcParams(self):
"""Get an rcParams dict for this theme.
Notes
-----
Subclasses should not need to override this method method as long as
self._rcParams is constructed properly.
rcParams are used during plotting. Sometimes the same theme can be
achieved by setting rcParams before plotting or a post_plot_callback
after plotting. The choice of how to implement it is is a matter of
convenience in that case.
There are certain things can only be themed after plotting. There
may not be an rcParam to control the theme or the act of plotting
may cause an entity to come into existence before it can be themed.
"""
rcParams = deepcopy(self._rcParams)
if self.element_themes:
for element_theme in self.element_themes:
rcparams = element_theme.get_rcParams()
rcParams.update(rcparams)
return rcParams
def post_plot_callback(self, ax):
"""Apply this theme, then apply additional modifications in order.
This method should not be overridden. Subclasses should override
the apply_theme subclass. This implementation will ensure that the
a theme that includes partial themes will be themed properly.
"""
self.apply_theme(ax)
# does this need to be ordered first?
for element_theme in self.element_themes:
element_theme.post_plot_callback(ax)
def add_theme(self, other):
"""Add themes together.
Subclasses should not override this method.
This will be called when adding two instances of class 'theme'
together.
A complete theme will annihilate any previous themes. Partial themes
can be added together and can be added to a complete theme.
"""
if other.complete:
return other
else:
theme_copy = deepcopy(self)
theme_copy.element_themes = merge_element_targets(
deepcopy(self.element_themes),
deepcopy(other.element_themes))
return theme_copy
def __add__(self, other):
if isinstance(other, theme):
return self.add_theme(other)
else:
raise TypeError()
def __radd__(self, other):
"""Subclasses should not override this method.
This will be called in one of two ways:
gg + theme which is translated to self=theme, other=gg
or
theme1 + theme2 which is translated into self=theme2, other=theme1
"""
if not isinstance(other, theme):
gg_copy = deepcopy(other)
if self.complete:
gg_copy.theme = self
else:
gg_copy.theme = other.theme.add_theme(self)
return gg_copy
# other _ self is theme + self
else:
# adding theme and theme here
# other + self
# if self is complete return self
if self.complete:
return self
# else make a copy of other combined with self.
else:
theme_copy = deepcopy(other)
theme_copy.element_themes.append(self)
return theme_copy
| bsd-2-clause |
heli522/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/artist.py | 69 | 33042 | from __future__ import division
import re, warnings
import matplotlib
import matplotlib.cbook as cbook
from transforms import Bbox, IdentityTransform, TransformedBbox, TransformedPath
from path import Path
## Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
# http://groups.google.com/groups?hl=en&lr=&threadm=mailman.5090.1098044946.5135.python-list%40python.org&rnum=1&prev=/groups%3Fq%3D__doc__%2Bauthor%253Ajdhunter%2540ace.bsd.uchicago.edu%26hl%3Den%26btnG%3DGoogle%2BSearch
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = 1.0
self.clipbox = None
self._clippath = None
self._clipon = True
self._lod = False
self._label = ''
self._picker = None
self._contains = None
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self.axes = None
self._remove_method = None
self._url = None
self.x_isdata = True # False to avoid updating Axes.dataLim with x
self.y_isdata = True # with y
self._snap = None
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should set
# the _remove_method attribute directly. This would be a protected
# attribute if Python supported that sort of thing. The callback
# has one parameter, which is the child to be removed.
if self._remove_method != None:
self._remove_method(self)
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property
# of whether or not the artist should affect the limits. Then there
# will be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
#print 'artist.convert_xunits no conversion: ax=%s'%ax
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None: return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*
"""
return self.axes
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
"""
try: del self._propobservers[oid]
except KeyError: pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
import traceback
L = []
try:
hascursor,info = self.contains(event)
if hascursor:
L.append(self)
except:
traceback.print_exc()
print "while checking",self.__class__
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if callable(self._contains): return self._contains(self,mouseevent)
#raise NotImplementedError,str(self.__class__)+" needs 'contains' method"
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False,{}
def set_contains(self,picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside,prop = picker(self,mouseevent)
else:
inside,prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g. the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
"""
self._url = url
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
return self._snap
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
self._snap = snap
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
self.figure = fig
self.pchanged()
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
if not success:
print type(path), type(transform)
raise TypeError("Invalid arguments to set_clip_path")
self.pchanged()
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
ACCEPTS: [True | False]
"""
self._clipon = b
self.pchanged()
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible(): return
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
def set_lod(self, on):
"""
Set Level of Detail on or off. If on, the artists may examine
things like the pixel width of the axes and draw a subset of
their contents accordingly
ACCEPTS: [True | False]
"""
self._lod = on
self.pchanged()
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k,v in props.items():
func = getattr(self, 'set_'+k, None)
if func is None or not callable(func):
raise AttributeError('Unknown property %s'%k)
func(v)
changed = True
self.eventson = store
if changed: self.pchanged()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: any string
"""
self._label = s
self.pchanged()
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._lod = other._lod
self._label = other._label
self.pchanged()
def set(self, **kwargs):
"""
A tkstyle set command, pass *kwargs* to set properties
"""
ret = []
for k,v in kwargs.items():
k = k.lower()
funcName = "set_%s"%k
func = getattr(self,funcName)
ret.extend( [func(v)] )
return ret
def findobj(self, match=None):
"""
pyplot signature:
findobj(o=gcf(), match=None)
Recursively find all :class:matplotlib.artist.Artist instances
contained in self.
*match* can be
- None: return all objects contained in artist (including artist)
- function with signature ``boolean = match(artist)`` used to filter matches
- class instance: eg Line2D. Only return artists of class type
.. plot:: mpl_examples/pylab_examples/findobj_demo.py
"""
if match is None: # always return True
def matchfunc(x): return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
class ArtistInspector:
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of
:class:`Artists`. If a sequence is used, we assume it is a
homogeneous sequence (all :class:`Artists` are of the same
type) and it is your responsibility to make sure this is so.
"""
if cbook.iterable(o) and len(o): o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
Eg., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and callable(getattr(self.o,name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func): continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))")
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
Eg., for a line linestyle, return
[ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
"""
name = 'set_%s'%attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s'%(self.o,name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None: return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return match.group(1).replace('\n', ' ')
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'): continue
o = getattr(self.o, name)
if not callable(o): continue
func = o
if self.is_alias(func): continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. Eg., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None: return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target) for prop, target in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '='*col0_len + ' ' + '='*col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len+3) + \
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len+3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and callable(getattr(o, name))]
#print getters
getters.sort()
lines = []
for name in getters:
func = getattr(o, name)
if self.is_alias(func): continue
try: val = func()
except: continue
if getattr(val, 'shape', ()) != () and len(val)>6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s)>50:
s = s[:50] + '...'
name = self.aliased_name(name[4:])
lines.append(' %s = %s' %(name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: eg :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x): return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(o, property=None):
"""
Return the value of handle property. property is an optional string
for the property you want to return
Example usage::
getp(o) # get all the object properties
getp(o, 'linestyle') # get the linestyle property
*o* is a :class:`Artist` instance, eg
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
o.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(o)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
insp = ArtistInspector(o)
if property is None:
ret = insp.pprint_getters()
print '\n'.join(ret)
return
func = getattr(o, 'get_' + property)
return func()
# alias
get = getp
def setp(h, *args, **kwargs):
"""
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. E.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the matlab(TM) style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', r') # matlab style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(h)
if len(kwargs)==0 and len(args)==0:
print '\n'.join(insp.pprint_setters())
return
if len(kwargs)==0 and len(args)==1:
print insp.pprint_setters(prop=args[0])
return
if not cbook.iterable(h): h = [h]
else: h = cbook.flatten(h)
if len(args)%2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args)-1, 2):
funcvals.append((args[i], args[i+1]))
funcvals.extend(kwargs.items())
ret = []
for o in h:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s"%s
func = getattr(o,funcName)
ret.extend( [func(val)] )
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
kwdocd = dict()
kwdocd['Artist'] = kwdoc(Artist)
| gpl-3.0 |
nvoron23/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
Crobisaur/HyperSpec | Python/loadData2Hdf.py | 1 | 5400 | __author__ = "Christo Robison"
import numpy as np
from scipy import signal
from scipy import misc
import h5py
from PIL import Image
import os
import collections
import matplotlib.pyplot as plt
import convertBsqMulti as bsq
import png
'''This program reads in BSQ datacubes into an HDF file'''
def loadBSQ(path = '/home/crob/HyperSpec_Data/WBC v ALL/WBC25', debug=False):
d31 = []
d31_norm = []
d25 = []
d25_norm = []
l25 = []
l = []
l3 = []
lam = []
for root, dirs, files in os.walk(path):
print(dirs)
for name in sorted(files): #os walk iterates arbitrarily, sort fixes it
print(name)
if name.endswith(".png"):
# Import label image
im = np.array(Image.open(os.path.join(root,name)),'f')
print np.shape(im)
im = im[:,:,0:3] # > 250
# generate a mask for 3x3 conv layer (probably not needed)
#conv3bw = signal.convolve2d(bw, np.ones([22,22],dtype=np.int), mode='valid') >= 464
print(np.shape(im))
#p = open(name+'_22sqMask.png','wb')
#w = png.Writer(255)
#bw = np.flipud(bw)
im = np.flipud(im)
#l3.append(np.reshape(conv3bw, ))
#l.append(np.reshape(bw, 138659))
l.append(im)
print(np.shape(im))
print("Name = " + name)
if name.endswith(".bsq"):
bs = bsq.readbsq(os.path.join(root,name))
print(np.shape(bs[0]))
print(len(bs[1]))
#separate bsq files by prism
if len(bs[1]) == 31:
print('BSQ is size 31')
print(len(bs[1]))
lam = bs[1]
#d31.append(np.reshape(np.transpose(bs[0], (1, 2, 0)), 4298429))
d31.append(bs[0].astype(np.float32))
d31_norm.append(bs[0].astype(np.float32)/np.amax(bs[0]))
if len(bs[1]) == 25:
print('BSQ is size 25')
print(len(bs[1]))
lam = bs[1]
d25.append(bs[0].astype(np.float32))
d25_norm.append(bs[0].astype(np.float32)/np.amax(bs[0]))
#d25.append(np.reshape(bs[0],[138659,25]).astype(np.float32))
# old don't use #d25.append(np.reshape(np.transpose(bs[0], (1, 2, 0)), 3466475))
out = collections.namedtuple('examples', ['data31', 'data31_norm', 'data25', 'data25_norm', 'labels', 'lambdas'])
o = out(data31=np.dstack(d31),data31_norm=np.dstack(d31_norm), data25=d25, data25_norm=d25_norm, labels=np.dstack(l), lambdas=lam) #np.vstack(d25), labels=np.hstack(l)
return o
def convLabels(labelImg, numBands):
'''
takes a MxNx3 numpy array and creates binary labels based on predefined classes
background = 0
red = 1 WBC
green = 2 RBC
pink = 3 nuclear material
yellow = 4 ignore
'''
#b = np.uint8(numBands / 31)
# print(b / 31)
tempRed = labelImg[:,:,0] == 255
tempGreen = labelImg[:,:,1] == 255
tempBlue = labelImg[:,:,2] == 255
tempYellow = np.logical_and(tempRed, tempGreen)
tempPink = np.logical_and(tempRed, tempBlue)
temp = np.zeros(np.shape(tempRed))
temp[tempRed] = 1
temp[tempGreen] = 2
temp[tempPink] = 3
temp[tempYellow] = 4
print(temp)
print(tempRed, tempGreen, tempBlue, tempYellow, tempPink)
return temp
def convert_labels(labels,n_classes, debug = False):
for j in range(n_classes):
temp = labels == j
temp = temp.astype(int)
if j > 0:
conv_labels = np.append(conv_labels, temp)
print(temp[:])
else:
conv_labels = temp
print(np.shape(conv_labels))
conv_labels = np.reshape(conv_labels, [len(labels), n_classes], order='F')
if debug: print(np.shape(conv_labels))
if debug:
f = h5py.File("/home/crob/HyperSpec/Python/BSQ_whole.h5", "w")
f.create_dataset('bin_labels', data=conv_labels)
f.close()
return conv_labels
def getClassMean(data, classNum):
kee = np.equal(data['label'],classNum)
out = np.mean(data['data']*kee,axis=0)
return out
def getAverages(data, numClasses):
out = []
for i in range(numClasses):
a = getClassMean(data, i)
out.append(a)
return out
if __name__ == '__main__':
#A = loadBSQ()
path = '/home/crob/-_PreSortedData_Train_-' #oldpath=/HyperSpec_Data/WBC v ALL/WBC25
s = loadBSQ(path)
print(np.shape(s.data25))
f = h5py.File("HYPER_SPEC_TRAIN_RED.h5", "w")
f.create_dataset('data', data=s.data31, chunks=(443, 313, 1))
f.create_dataset('norm_data', data=s.data31_norm, chunks=(443,313,1))
f.create_dataset('labels', data=s.labels)
f.create_dataset('bands', data=s.lambdas)
g = np.shape(s.data31)
b = np.uint16(g[2] / 31) #issue with overflow if more than 256 samples. derp.
lab = np.reshape(s.labels, [443, 313, 3, b], 'f')
numExamples = np.shape(lab)
a = []
for j in range(np.uint16(numExamples[3])):
a.append(convLabels(lab[:, :, :, j], None))
f.create_dataset('classLabels', data=np.dstack(a))
#p = convert_labels(s.labels,2)
#f.create_dataset('bin_labels', data=p)
f.close() | gpl-3.0 |
mattthias/one | src/sunstone/public/bower_components/no-vnc/utils/json2graph.py | 46 | 6674 | #!/usr/bin/env python
'''
Use matplotlib to generate performance charts
Copyright 2011 Joel Martin
Licensed under MPL-2.0 (see docs/LICENSE.MPL-2.0)
'''
# a bar plot with errorbars
import sys, json, pprint
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def usage():
print "%s json_file level1 level2 level3 [legend_height]\n\n" % sys.argv[0]
print "Description:\n"
print "level1, level2, and level3 are one each of the following:\n";
print " select=ITEM - select only ITEM at this level";
print " bar - each item on this level becomes a graph bar";
print " group - items on this level become groups of bars";
print "\n";
print "json_file is a file containing json data in the following format:\n"
print ' {';
print ' "conf": {';
print ' "order_l1": [';
print ' "level1_label1",';
print ' "level1_label2",';
print ' ...';
print ' ],';
print ' "order_l2": [';
print ' "level2_label1",';
print ' "level2_label2",';
print ' ...';
print ' ],';
print ' "order_l3": [';
print ' "level3_label1",';
print ' "level3_label2",';
print ' ...';
print ' ]';
print ' },';
print ' "stats": {';
print ' "level1_label1": {';
print ' "level2_label1": {';
print ' "level3_label1": [val1, val2, val3],';
print ' "level3_label2": [val1, val2, val3],';
print ' ...';
print ' },';
print ' "level2_label2": {';
print ' ...';
print ' },';
print ' },';
print ' "level1_label2": {';
print ' ...';
print ' },';
print ' ...';
print ' },';
print ' }';
sys.exit(2)
def error(msg):
print msg
sys.exit(1)
#colors = ['#ff0000', '#0863e9', '#00f200', '#ffa100',
# '#800000', '#805100', '#013075', '#007900']
colors = ['#ff0000', '#00ff00', '#0000ff',
'#dddd00', '#dd00dd', '#00dddd',
'#dd6622', '#dd2266', '#66dd22',
'#8844dd', '#44dd88', '#4488dd']
if len(sys.argv) < 5:
usage()
filename = sys.argv[1]
L1 = sys.argv[2]
L2 = sys.argv[3]
L3 = sys.argv[4]
if len(sys.argv) > 5:
legendHeight = float(sys.argv[5])
else:
legendHeight = 0.75
# Load the JSON data from the file
data = json.loads(file(filename).read())
conf = data['conf']
stats = data['stats']
# Sanity check data hierarchy
if len(conf['order_l1']) != len(stats.keys()):
error("conf.order_l1 does not match stats level 1")
for l1 in stats.keys():
if len(conf['order_l2']) != len(stats[l1].keys()):
error("conf.order_l2 does not match stats level 2 for %s" % l1)
if conf['order_l1'].count(l1) < 1:
error("%s not found in conf.order_l1" % l1)
for l2 in stats[l1].keys():
if len(conf['order_l3']) != len(stats[l1][l2].keys()):
error("conf.order_l3 does not match stats level 3")
if conf['order_l2'].count(l2) < 1:
error("%s not found in conf.order_l2" % l2)
for l3 in stats[l1][l2].keys():
if conf['order_l3'].count(l3) < 1:
error("%s not found in conf.order_l3" % l3)
#
# Generate the data based on the level specifications
#
bar_labels = None
group_labels = None
bar_vals = []
bar_sdvs = []
if L3.startswith("select="):
select_label = l3 = L3.split("=")[1]
bar_labels = conf['order_l1']
group_labels = conf['order_l2']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l1 = bar_labels[b]
for g in range(len(group_labels)):
l2 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
elif L2.startswith("select="):
select_label = l2 = L2.split("=")[1]
bar_labels = conf['order_l1']
group_labels = conf['order_l3']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l1 = bar_labels[b]
for g in range(len(group_labels)):
l3 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
elif L1.startswith("select="):
select_label = l1 = L1.split("=")[1]
bar_labels = conf['order_l2']
group_labels = conf['order_l3']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l2 = bar_labels[b]
for g in range(len(group_labels)):
l3 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
else:
usage()
# If group is before bar then flip (zip) the data
if [L1, L2, L3].index("group") < [L1, L2, L3].index("bar"):
bar_labels, group_labels = group_labels, bar_labels
bar_vals = zip(*bar_vals)
bar_sdvs = zip(*bar_sdvs)
print "bar_vals:", bar_vals
#
# Now render the bar graph
#
ind = np.arange(len(group_labels)) # the x locations for the groups
width = 0.8 * (1.0/len(bar_labels)) # the width of the bars
fig = plt.figure(figsize=(10,6), dpi=80)
plot = fig.add_subplot(1, 1, 1)
rects = []
for i in range(len(bar_vals)):
rects.append(plot.bar(ind+width*i, bar_vals[i], width, color=colors[i],
yerr=bar_sdvs[i], align='center'))
# add some
plot.set_ylabel('Milliseconds (less is better)')
plot.set_title("Javascript array test: %s" % select_label)
plot.set_xticks(ind+width)
plot.set_xticklabels( group_labels )
fontP = FontProperties()
fontP.set_size('small')
plot.legend( [r[0] for r in rects], bar_labels, prop=fontP,
loc = 'center right', bbox_to_anchor = (1.0, legendHeight))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
if np.isnan(height):
height = 0.0
plot.text(rect.get_x()+rect.get_width()/2., height+20, '%d'%int(height),
ha='center', va='bottom', size='7')
for rect in rects:
autolabel(rect)
# Adjust axis sizes
axis = list(plot.axis())
axis[0] = -width # Make sure left side has enough for bar
#axis[1] = axis[1] * 1.20 # Add 20% to the right to make sure it fits
axis[2] = 0 # Make y-axis start at 0
axis[3] = axis[3] * 1.10 # Add 10% to the top
plot.axis(axis)
plt.show()
| apache-2.0 |
v4hn/moveit | moveit_ros/benchmarks/scripts/moveit_benchmark_statistics.py | 2 | 25524 | #!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll, Ioan Sucan, Luis G. Torres
from sys import argv, exit
from os.path import basename, splitext
import sqlite3
import datetime
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from math import floor
from optparse import OptionParser, OptionGroup
# Given a text line, split it into tokens (by space) and return the token
# at the desired index. Additionally, test that some expected tokens exist.
# Return None if they do not.
def readLogValue(filevar, desired_token_index, expected_tokens) :
start_pos = filevar.tell()
tokens = filevar.readline().split()
for token_index in expected_tokens:
if not tokens[token_index] == expected_tokens[token_index]:
# undo the read, if we failed to parse.
filevar.seek(start_pos)
return None
return tokens[desired_token_index]
def readOptionalLogValue(filevar, desired_token_index, expected_tokens = {}) :
return readLogValue(filevar, desired_token_index, expected_tokens)
def readRequiredLogValue(name, filevar, desired_token_index, expected_tokens = {}) :
result = readLogValue(filevar, desired_token_index, expected_tokens)
if result == None:
raise Exception("Unable to read " + name)
return result
def ensurePrefix(line, prefix):
if not line.startswith(prefix):
raise Exception("Expected prefix " + prefix + " was not found")
return line
def readOptionalMultilineValue(filevar):
start_pos = filevar.tell()
line = filevar.readline()
if not line.startswith("<<<|"):
filevar.seek(start_pos)
return None
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line == None:
raise Exception("Expected token |>>> missing")
return value
def readRequiredMultilineValue(filevar):
ensurePrefix(filevar.readline(), "<<<|")
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line == None:
raise Exception("Expected token |>>> missing")
return value
def readBenchmarkLog(dbname, filenames):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
# create all tables if they don't already exist
c.executescript("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512),
totaltime REAL, timelimit REAL, memorylimit REAL, runcount INTEGER,
version VARCHAR(128), hostname VARCHAR(1024), cpuinfo TEXT,
date DATETIME, seed INTEGER, setup TEXT);
CREATE TABLE IF NOT EXISTS plannerConfigs
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(512) NOT NULL, settings TEXT);
CREATE TABLE IF NOT EXISTS enums
(name VARCHAR(512), value INTEGER, description TEXT,
PRIMARY KEY (name, value));
CREATE TABLE IF NOT EXISTS runs
(id INTEGER PRIMARY KEY AUTOINCREMENT, experimentid INTEGER, plannerid INTEGER,
FOREIGN KEY (experimentid) REFERENCES experiments(id) ON DELETE CASCADE,
FOREIGN KEY (plannerid) REFERENCES plannerConfigs(id) ON DELETE CASCADE);
CREATE TABLE IF NOT EXISTS progress
(runid INTEGER, time REAL, PRIMARY KEY (runid, time),
FOREIGN KEY (runid) REFERENCES runs(id) ON DELETE CASCADE)""")
for filename in filenames:
print('Processing ' + filename)
logfile = open(filename,'r')
start_pos = logfile.tell()
libname = readOptionalLogValue(logfile, 0, {1 : "version"})
if libname == None:
libname = "OMPL"
logfile.seek(start_pos)
version = readOptionalLogValue(logfile, -1, {1 : "version"})
if version == None:
# set the version number to make Planner Arena happy
version = "0.0.0"
version = ' '.join([libname, version])
expname = readRequiredLogValue("experiment name", logfile, -1, {0 : "Experiment"})
hostname = readRequiredLogValue("hostname", logfile, -1, {0 : "Running"})
date = ' '.join(ensurePrefix(logfile.readline(), "Starting").split()[2:])
expsetup = readRequiredMultilineValue(logfile)
cpuinfo = readOptionalMultilineValue(logfile)
rseed = int(readRequiredLogValue("random seed", logfile, 0, {-2 : "random", -1 : "seed"}))
timelimit = float(readRequiredLogValue("time limit", logfile, 0, {-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = float(readRequiredLogValue("memory limit", logfile, 0, {-3 : "MB", -2 : "per", -1 : "run"}))
nrrunsOrNone = readOptionalLogValue(logfile, 0, {-3 : "runs", -2 : "per", -1 : "planner"})
nrruns = -1
if nrrunsOrNone != None:
nrruns = int(nrrunsOrNone)
totaltime = float(readRequiredLogValue("total time", logfile, 0, {-3 : "collect", -2 : "the", -1 : "data"}))
numEnums = 0
numEnumsOrNone = readOptionalLogValue(logfile, 0, {-2 : "enum"})
if numEnumsOrNone != None:
numEnums = int(numEnumsOrNone)
for i in range(numEnums):
enum = logfile.readline()[:-1].split('|')
c.execute('SELECT * FROM enums WHERE name IS "%s"' % enum[0])
if c.fetchone() == None:
for j in range(len(enum)-1):
c.execute('INSERT INTO enums VALUES (?,?,?)',
(enum[0],j,enum[j+1]))
c.execute('INSERT INTO experiments VALUES (?,?,?,?,?,?,?,?,?,?,?,?)',
(None, expname, totaltime, timelimit, memorylimit, nrruns,
version, hostname, cpuinfo, date, rseed, expsetup) )
experimentId = c.lastrowid
numPlanners = int(readRequiredLogValue("planner count", logfile, 0, {-1 : "planners"}))
for i in range(numPlanners):
plannerName = logfile.readline()[:-1]
print('Parsing data for ' + plannerName)
# read common data for planner
numCommon = int(logfile.readline().split()[0])
settings = ''
for j in range(numCommon):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute('SELECT id FROM plannerConfigs WHERE (name=? AND settings=?)',
(plannerName, settings,))
p = c.fetchone()
if p==None:
c.execute('INSERT INTO plannerConfigs VALUES (?,?,?)',
(None, plannerName, settings,))
plannerId = c.lastrowid
else:
plannerId = p[0]
# get current column names
c.execute('PRAGMA table_info(runs)')
columnNames = [col[1] for col in c.fetchall()]
# read properties and add columns as necessary
numProperties = int(logfile.readline().split()[0])
propertyNames = ['experimentid', 'plannerid']
for j in range(numProperties):
field = logfile.readline().split()
propertyType = field[-1]
propertyName = '_'.join(field[:-1])
if propertyName not in columnNames:
c.execute('ALTER TABLE runs ADD %s %s' % (propertyName, propertyType))
propertyNames.append(propertyName)
# read measurements
insertFmtStr = 'INSERT INTO runs (' + ','.join(propertyNames) + \
') VALUES (' + ','.join('?'*len(propertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
runIds = []
for j in range(numRuns):
values = tuple([experimentId, plannerId] + \
[None if len(x) == 0 or x == 'nan' or x == 'inf' else x
for x in logfile.readline().split('; ')[:-1]])
c.execute(insertFmtStr, values)
# extract primary key of each run row so we can reference them
# in the planner progress data table if needed
runIds.append(c.lastrowid)
nextLine = logfile.readline().strip()
# read planner progress data if it's supplied
if nextLine != '.':
# get current column names
c.execute('PRAGMA table_info(progress)')
columnNames = [col[1] for col in c.fetchall()]
# read progress properties and add columns as necesary
numProgressProperties = int(nextLine.split()[0])
progressPropertyNames = ['runid']
for i in range(numProgressProperties):
field = logfile.readline().split()
progressPropertyType = field[-1]
progressPropertyName = "_".join(field[:-1])
if progressPropertyName not in columnNames:
c.execute('ALTER TABLE progress ADD %s %s' %
(progressPropertyName, progressPropertyType))
progressPropertyNames.append(progressPropertyName)
# read progress measurements
insertFmtStr = 'INSERT INTO progress (' + \
','.join(progressPropertyNames) + ') VALUES (' + \
','.join('?'*len(progressPropertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
for j in range(numRuns):
dataSeries = logfile.readline().split(';')[:-1]
for dataSample in dataSeries:
values = tuple([runIds[j]] + \
[None if len(x) == 0 or x == 'nan' or x == 'inf' else x
for x in dataSample.split(',')[:-1]])
try:
c.execute(insertFmtStr, values)
except sqlite3.IntegrityError:
print('Ignoring duplicate progress data. Consider increasing ompl::tools::Benchmark::Request::timeBetweenUpdates.')
pass
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plotAttribute(cur, planners, attribute, typename):
"""Create a plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
labels = []
measurements = []
nanCounts = []
if typename == 'ENUM':
cur.execute('SELECT description FROM enums where name IS "%s"' % attribute)
descriptions = [ t[0] for t in cur.fetchall() ]
numValues = len(descriptions)
for planner in planners:
cur.execute('SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL' \
% (attribute, planner[0], attribute))
measurement = [ t[0] for t in cur.fetchall() if t[0] != None ]
if len(measurement) > 0:
cur.execute('SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL' \
% (planner[0], attribute))
nanCounts.append(cur.fetchone()[0])
labels.append(planner[1])
if typename == 'ENUM':
scale = 100. / len(measurement)
measurements.append([measurement.count(i)*scale for i in range(numValues)])
else:
measurements.append(measurement)
if len(measurements)==0:
print('Skipping "%s": no available measurements' % attribute)
return
plt.clf()
ax = plt.gca()
if typename == 'ENUM':
width = .5
measurements = np.transpose(np.vstack(measurements))
colsum = np.sum(measurements, axis=1)
rows = np.where(colsum != 0)[0]
heights = np.zeros((1,measurements.shape[1]))
ind = range(measurements.shape[1])
legend_labels = []
for i in rows:
plt.bar(ind, measurements[i], width, bottom=heights[0],
color=matplotlib.cm.hot(int(floor(i*256/numValues))),
label=descriptions[i])
heights = heights + measurements[i]
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
props = matplotlib.font_manager.FontProperties()
props.set_size('small')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop = props)
elif typename == 'BOOLEAN':
width = .5
measurementsPercentage = [sum(m) * 100. / len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurementsPercentage, width)
xtickNames = plt.xticks([x + width / 2. for x in ind], labels, rotation=30, fontsize=8)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
plt.subplots_adjust(bottom=0.3) # Squish the plot into the upper 2/3 of the page. Leave room for labels
else:
if int(matplotlibversion.split('.')[0])<1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_',' '))
#xtickNames = plt.xticks(labels, rotation=30, fontsize=10)
#plt.subplots_adjust(bottom=0.3) # Squish the plot into the upper 2/3 of the page. Leave room for labels
xtickNames = plt.setp(ax,xticklabels=labels)
plt.setp(xtickNames, rotation=30)
for tick in ax.xaxis.get_major_ticks(): # shrink the font size of the x tick labels
tick.label.set_fontsize(8)
plt.subplots_adjust(bottom=0.3) # Squish the plot into the upper 2/3 of the page. Leave room for labels
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nanCounts)>0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i+width/2 if typename=='BOOLEAN' else i+1
ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small')
plt.show()
def plotProgressAttribute(cur, planners, attribute):
"""Plot data for a single planner progress attribute. Will create an
average time-plot with error bars of the attribute over all runs for
each planner."""
import numpy.ma as ma
plt.clf()
ax = plt.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel(attribute.replace('_',' '))
plannerNames = []
for planner in planners:
cur.execute("""SELECT count(progress.%s) FROM progress INNER JOIN runs
ON progress.runid = runs.id AND runs.plannerid=%s
AND progress.%s IS NOT NULL""" \
% (attribute, planner[0], attribute))
if cur.fetchone()[0] > 0:
plannerNames.append(planner[1])
cur.execute("""SELECT DISTINCT progress.runid FROM progress INNER JOIN runs
WHERE progress.runid=runs.id AND runs.plannerid=?""", (planner[0],))
runids = [t[0] for t in cur.fetchall()]
timeTable = []
dataTable = []
for r in runids:
# Select data for given run
cur.execute('SELECT time, %s FROM progress WHERE runid = %s ORDER BY time' % (attribute,r))
(time, data) = zip(*(cur.fetchall()))
timeTable.append(time)
dataTable.append(data)
# It's conceivable that the sampling process may have
# generated more samples for one run than another; in this
# case, truncate all data series to length of shortest
# one.
fewestSamples = min(len(time[:]) for time in timeTable)
times = np.array(timeTable[0][:fewestSamples])
dataArrays = np.array([data[:fewestSamples] for data in dataTable])
filteredData = ma.masked_array(dataArrays, np.equal(dataArrays, None), dtype=float)
means = np.mean(filteredData, axis=0)
stddevs = np.std(filteredData, axis=0, ddof=1)
# plot average with error bars
plt.errorbar(times, means, yerr=2*stddevs, errorevery=max(1, len(times) // 20))
ax.legend(plannerNames)
if len(plannerNames)>0:
plt.show()
else:
plt.clf()
def plotStatistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plots...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('SELECT id, name FROM plannerConfigs')
planners = [(t[0],t[1].replace('geometric_','').replace('control_',''))
for t in c.fetchall()]
c.execute('PRAGMA table_info(runs)')
colInfo = c.fetchall()[3:]
pp = PdfPages(fname)
for col in colInfo:
if col[2] == 'BOOLEAN' or col[2] == 'ENUM' or \
col[2] == 'INTEGER' or col[2] == 'REAL':
plotAttribute(c, planners, col[1], col[2])
pp.savefig(plt.gcf())
c.execute('PRAGMA table_info(progress)')
colInfo = c.fetchall()[2:]
for col in colInfo:
plotProgressAttribute(c, planners, col[1])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
c.execute("""SELECT id, name, timelimit, memorylimit FROM experiments""")
experiments = c.fetchall()
for experiment in experiments:
c.execute("""SELECT count(*) FROM runs WHERE runs.experimentid = %d
GROUP BY runs.plannerid""" % experiment[0])
numRuns = [run[0] for run in c.fetchall()]
numRuns = numRuns[0] if len(set(numRuns)) == 1 else ','.join(numRuns)
plt.figtext(pagex, pagey, 'Experiment "%s"' % experiment[1])
plt.figtext(pagex, pagey-0.05, 'Number of averaged runs: %d' % numRuns)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %g seconds" % experiment[2])
plt.figtext(pagex, pagey-0.15, "Memory limit per run: %g MB" % experiment[3])
pagey -= 0.22
plt.show()
pp.savefig(plt.gcf())
pp.close()
def saveAsMysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump,'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in c.fetchall() ]
c.close()
last = ['experiments', 'planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION','COMMIT',
'sqlite_sequence','CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line: break
else:
process = True
if not process: continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"','`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def computeViews(dbname):
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('PRAGMA table_info(runs)')
# kinodynamic paths cannot be simplified (or least not easily),
# so simplification_time may not exist as a database column
if 'simplification_time' in [col[1] for col in c.fetchall()]:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time + simplification_time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
else:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
s1 = """SELECT plannerid, plannerName, experimentid, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid, experimentid""" % s0
s2 = """SELECT plannerid, experimentid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName, experimentid ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigsPerExperiment')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigsPerExperiment AS %s' % s2)
s1 = """SELECT plannerid, plannerName, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid""" % s0
s2 = """SELECT plannerid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigs')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigs AS %s' % s2)
conn.commit()
c.close()
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser("A script to parse benchmarking results.\n" + usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db",
help="Filename of benchmark database [default: %default]")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False,
help="Compute the views for best planner configurations")
parser.add_option("-p", "--plot", dest="plot", default=None,
help="Create a PDF of plots with the filename provided")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None,
help="Save SQLite3 database as a MySQL dump file")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("No arguments were provided. Please provide full path of log file")
if len(args) == 1:
readBenchmarkLog(options.dbname, args)
# If we update the database, we recompute the views as well
options.view = True
if options.view:
computeViews(options.dbname)
if options.plot:
plotStatistics(options.dbname, options.plot)
if options.mysqldb:
saveAsMysql(options.dbname, options.mysqldb)
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/collections.py | 4 | 65176 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g., a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g., you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g., a large set of solid
line segemnts)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
import warnings
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib import docstring
import matplotlib.transforms as transforms
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
from matplotlib import _path
import matplotlib.mlab as mlab
CIRCLE_AREA_FACTOR = 1.0 / np.sqrt(np.pi)
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *offset_position*: 'screen' (default) or 'data'
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *hatch*: None
* *zorder*: 1
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets). If offset_position is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates. If
offset_position is 'data', the offset is applied before the master
transform, i.e., the offsets are in data coordinates.
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(i.e., a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
# _offsets must be a Nx2 array!
_offsets.shape = (0, 2)
_transOffset = transforms.IdentityTransform()
#: Either a list of 3x3 arrays or an Nx3x3 array of transforms, suitable
#: for the `all_transforms` argument to
#: :meth:`~matplotlib.backend_bases.RendererBase.draw_path_collection`;
#: each 3x3 array is used to initialize an
#: :class:`~matplotlib.transforms.Affine2D` object.
#: Each kind of collection defines this based on its arguments.
_transforms = np.empty((0, 3, 3))
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds=None,
offsets=None,
transOffset=None,
norm=None, # optional for ScalarMappable
cmap=None, # ditto
pickradius=5.0,
hatch=None,
urls=None,
offset_position='screen',
zorder=1,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_pickradius(pickradius)
self.set_urls(urls)
self.set_hatch(hatch)
self.set_offset_position(offset_position)
self.set_zorder(zorder)
self._uniform_offsets = None
self._offsets = np.array([[0, 0]], np.float_)
if offsets is not None:
offsets = np.asanyarray(offsets)
offsets.shape = (-1, 2) # Make it Nx2
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._path_effects = None
self.update(kwargs)
self._paths = None
@staticmethod
def _get_value(val):
try:
return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try:
float(val[0])
except (TypeError, ValueError):
pass # raise below
else:
return val
raise TypeError('val must be a float or nonzero sequence of floats')
@staticmethod
def _get_bool(val):
if not cbook.iterable(val):
val = (val,)
try:
bool(val[0])
except (TypeError, IndexError):
raise TypeError('val must be a bool or nonzero sequence of them')
return val
def get_paths(self):
return self._paths
def set_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_offset_transform(self):
t = self._transOffset
if (not isinstance(t, transforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
return t
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asanyarray(offsets, np.float_)
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
# get_path_collection_extents handles nan but not masked arrays
offsets.shape = (-1, 2) # Make it Nx2
if len(paths) and len(offsets):
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
else:
result = transforms.Bbox.null()
return result
def get_window_extent(self, renderer):
# TODO:check to ensure that this does not fail for
# cases other than scatter plot legend
return self.get_datalim(transforms.IdentityTransform())
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(list(zip(xs, ys)), path.codes))
if offsets.size > 0:
xs = self.convert_xunits(offsets[:, 0])
ys = self.convert_yunits(offsets[:, 1])
offsets = list(zip(xs, ys))
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path)
for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
# This might have changed an ndarray into a masked array.
transOffset = transOffset.get_affine()
if np.ma.isMaskedArray(offsets):
offsets = offsets.filled(np.nan)
# Changing from a masked array to nan-filled ndarray
# is probably most efficient at this point.
return transform, transOffset, offsets, paths
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
self.update_scalarmappable()
transform, transOffset, offsets, paths = self._prepare_points()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_snap(self.get_snap())
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
# If the collection is made up of a single shape/color/stroke,
# it can be rendered once and blitted multiple times, using
# `draw_markers` rather than `draw_path_collection`. This is
# *much* faster for Agg, and results in smaller file sizes in
# PDF/SVG/PS.
trans = self.get_transforms()
facecolors = self.get_facecolor()
edgecolors = self.get_edgecolor()
do_single_path_optimization = False
if (len(paths) == 1 and len(trans) <= 1 and
len(facecolors) == 1 and len(edgecolors) == 1 and
len(self._linewidths) == 1 and
self._linestyles == [(None, None)] and
len(self._antialiaseds) == 1 and len(self._urls) == 1 and
self.get_hatch() is None):
if len(trans):
combined_transform = (transforms.Affine2D(trans[0]) +
transform)
else:
combined_transform = transform
extents = paths[0].get_extents(combined_transform)
width, height = renderer.get_canvas_width_height()
if (extents.width < width and
extents.height < height):
do_single_path_optimization = True
if do_single_path_optimization:
gc.set_foreground(tuple(edgecolors[0]))
gc.set_linewidth(self._linewidths[0])
gc.set_linestyle(self._linestyles[0])
gc.set_antialiased(self._antialiaseds[0])
gc.set_url(self._urls[0])
renderer.draw_markers(
gc, paths[0], combined_transform.frozen(),
mpath.Path(offsets), transOffset, tuple(facecolors[0]))
else:
renderer.draw_path_collection(
gc, transform.frozen(), paths,
self.get_transforms(), offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles,
self._antialiaseds, self._urls,
self._offset_position)
gc.restore()
renderer.close_group(self.__class__.__name__)
self.stale = False
def set_pickradius(self, pr):
self._pickradius = pr
def get_pickradius(self):
return self._pickradius
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible():
return False, {}
if self._picker is True: # the Boolean constant, not just nonzero or 1
pickradius = self._pickradius
else:
try:
pickradius = float(self._picker)
except TypeError:
# This should not happen if "contains" is called via
# pick, the normal route; the check is here in case
# it is called through some unanticipated route.
warnings.warn(
"Collection picker %s could not be converted to float"
% self._picker)
pickradius = self._pickradius
transform, transOffset, offsets, paths = self._prepare_points()
ind = _path.point_in_path_collection(
mouseevent.x, mouseevent.y, pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, pickradius <= 0,
self.get_offset_position())
return len(ind) > 0, dict(ind=ind)
def set_urls(self, urls):
if urls is None:
self._urls = [None, ]
else:
self._urls = urls
self.stale = True
def get_urls(self):
return self._urls
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Unlike other properties such as linewidth and colors, hatching
can only be specified for the collection as a whole, not separately
for each member.
ACCEPTS: [ '/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*' ]
"""
self._hatch = hatch
self.stale = True
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asanyarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
self.stale = True
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_offset_position(self, offset_position):
"""
Set how offsets are applied. If *offset_position* is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates.
If offset_position is 'data', the offset is applied before the
master transform, i.e., the offsets are in data coordinates.
"""
if offset_position not in ('screen', 'data'):
raise ValueError("offset_position must be 'screen' or 'data'")
self._offset_position = offset_position
self.stale = True
def get_offset_position(self):
"""
Returns how offsets are applied for the collection. If
*offset_position* is 'screen', the offset is applied after the
master transform has been applied, that is, the offsets are in
screen coordinates. If offset_position is 'data', the offset
is applied before the master transform, i.e., the offsets are
in data coordinates.
"""
return self._offset_position
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None:
lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
self.stale = True
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
=========================== =================
linestyle description
=========================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dash_dot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
=========================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq),
where ``onoffseq`` is an even length tuple of on and off ink
in points.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) |
``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
``' '`` | ``''``]
Parameters
----------
ls : { '-', '--', '-.', ':'} and more see description
The line style.
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
ls = cbook.ls_mapper.get(ls, ls)
if ls in dashd:
dashes = [dashd[ls]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
x = cbook.ls_mapper.get(x, x)
if x in dashd:
dashes.append(dashd[x])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls) == 2:
dashes = [ls]
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes' % ls)
self._linestyles = dashes
self.stale = True
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
self.stale = True
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color spec (all patches have same color), or a
sequence of specs; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'none', the patch will not be filled.
ACCEPTS: matplotlib color spec or sequence of specs
"""
self._is_filled = True
try:
if c.lower() == 'none':
self._is_filled = False
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
self.stale = True
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if (isinstance(self._edgecolors, six.string_types)
and self._edgecolors == str('face')):
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color spec (all patches have same color), or a
sequence of specs; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color. If it is 'none', the patch boundary will not
be drawn.
ACCEPTS: matplotlib color spec or sequence of specs
"""
self._is_stroked = True
try:
if c.lower() == 'none':
self._is_stroked = False
except AttributeError:
pass
try:
if c.lower() == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
return
except AttributeError:
pass
if c is None:
c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = mcolors.colorConverter.to_rgba_array(c, self._alpha)
self.stale = True
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if (not isinstance(self._edgecolors_original, six.string_types)
or self._edgecolors_original != str('face')):
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None:
return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if not self.check_update("array"):
return
if self._is_filled:
self._facecolors = self.to_rgba(self._A, self._alpha)
elif self._is_stroked:
self._edgecolors = self.to_rgba(self._A, self._alpha)
self.stale = True
def get_fill(self):
'return whether fill is set'
return self._is_filled
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
self._hatch = other._hatch
# update_from for scalarmappable
self._A = other._A
self.norm = other.norm
self.cmap = other.cmap
# self.update_dict = other.update_dict # do we need to copy this? -JJL
self.stale = True
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Collection="""\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
""")
class _CollectionWithSizes(Collection):
"""
Base class for collections that have an array of sizes.
"""
_factor = 1.0
def get_sizes(self):
"""
Returns the sizes of the elements in the collection. The
value represents the 'area' of the element.
Returns
-------
sizes : array
The 'area' of each element.
"""
return self._sizes
def set_sizes(self, sizes, dpi=72.0):
"""
Set the sizes of each member of the collection.
Parameters
----------
sizes : ndarray or None
The size to set for each element of the collection. The
value is the 'area' of the element.
dpi : float
The dpi of the canvas. Defaults to 72.0.
"""
if sizes is None:
self._sizes = np.array([])
self._transforms = np.empty((0, 3, 3))
else:
self._sizes = np.asarray(sizes)
self._transforms = np.zeros((len(self._sizes), 3, 3))
scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor
self._transforms[:, 0, 0] = scale
self._transforms[:, 1, 1] = scale
self._transforms[:, 2, 2] = 1.0
self.stale = True
@allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.figure.dpi)
Collection.draw(self, renderer)
class PathCollection(_CollectionWithSizes):
"""
This is the most basic :class:`Collection` subclass.
"""
@docstring.dedent_interpd
def __init__(self, paths, sizes=None, **kwargs):
"""
*paths* is a sequence of :class:`matplotlib.path.Path`
instances.
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self.set_paths(paths)
self.set_sizes(sizes)
self.stale = True
def set_paths(self, paths):
self._paths = paths
self.stale = True
def get_paths(self):
return self._paths
class PolyCollection(_CollectionWithSizes):
@docstring.dedent_interpd
def __init__(self, verts, sizes=None, closed=True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self.set_verts(verts, closed)
self.stale = True
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if np.ma.isMaskedArray(verts):
verts = verts.astype(np.float_).filled(np.nan)
# This is much faster than having Path do it one at a time.
if closed:
self._paths = []
for xy in verts:
if len(xy):
if np.ma.isMaskedArray(xy):
xy = np.ma.concatenate([xy, xy[0:1]])
else:
xy = np.asarray(xy)
xy = np.concatenate([xy, xy[0:1]])
codes = np.empty(xy.shape[0], dtype=mpath.Path.code_type)
codes[:] = mpath.Path.LINETO
codes[0] = mpath.Path.MOVETO
codes[-1] = mpath.Path.CLOSEPOLY
self._paths.append(mpath.Path(xy, codes))
else:
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
self.stale = True
set_paths = set_verts
def set_verts_and_codes(self, verts, codes):
'''This allows one to initialize vertices with path codes.'''
if (len(verts) != len(codes)):
raise ValueError("'codes' must be a 1D list or array "
"with the same length of 'verts'")
self._paths = []
for xy, cds in zip(verts, codes):
if len(xy):
self._paths.append(mpath.Path(xy, cds))
else:
self._paths.append(mpath.Path(xy))
self.stale = True
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
@docstring.dedent_interpd
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [[(xmin, ymin),
(xmin, ymax),
(xmin + xwidth, ymax),
(xmin + xwidth, ymin),
(xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned. *kwargs* are
passed on to the collection.
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1] - xslice[0]))
collection = BrokenBarHCollection(
xranges, [ymin, ymax - ymin], **kwargs)
return collection
class RegularPolyCollection(_CollectionWithSizes):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
_factor = CIRCLE_AREA_FACTOR
@docstring.dedent_interpd
def __init__(self,
numsides,
rotation=0,
sizes=(1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
@allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.figure.dpi)
self._transforms = [
transforms.Affine2D(x).rotate(-self._rotation).get_matrix()
for x in self._transforms
]
Collection.draw(self, renderer)
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
def __init__(self, segments, # Can be None.
linewidths=None,
colors=None,
antialiaseds=None,
linestyles='solid',
offsets=None,
transOffset=None,
norm=None,
cmap=None,
pickradius=5,
zorder=2,
facecolors='none',
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (e.g., arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
*zorder*
The zorder of the LineCollection. Default is 2
*facecolors*
The facecolors of the LineCollection. Default is 'none'
Setting to a value other than 'none' will lead to a filled
polygon being drawn between points on each line.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` array
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (i.e., a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None:
colors = mpl.rcParams['lines.color']
if linewidths is None:
linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None:
antialiaseds = (mpl.rcParams['lines.antialiased'],)
colors = mcolors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
facecolors=facecolors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
zorder=zorder,
**kwargs)
self.set_segments(segments)
def set_segments(self, segments):
if segments is None:
return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(_seg) for _seg in _segments]
self.stale = True
set_verts = set_segments # for compatibility with PolyCollection
set_paths = set_segments
def get_segments(self):
segments = []
for path in self._paths:
vertices = [vertex for vertex, _ in path.iter_segments()]
vertices = np.asarray(vertices)
segments.append(vertices)
return segments
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i % Noffs
segs[i] = segs[i] + offsets[io:io + 1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self.set_edgecolor(c)
self.stale = True
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class EventCollection(LineCollection):
'''
A collection of discrete events.
An event is a 1-dimensional value, usually the position of something along
an axis, such as time or length. Events do not have an amplitude. They
are displayed as v
'''
def __init__(self,
positions, # Can be None.
orientation=None,
lineoffset=0,
linelength=1,
linewidth=None,
color=None,
linestyle='solid',
antialiased=None,
**kwargs
):
"""
*positions*
a sequence of numerical values or a 1D numpy array. Can be None
*orientation* [ 'horizontal' | 'vertical' | None ]
defaults to 'horizontal' if not specified or None
*lineoffset*
a single numerical value, corresponding to the offset of the center
of the markers from the origin
*linelength*
a single numerical value, corresponding to the total height of the
marker (i.e. the marker stretches from lineoffset+linelength/2 to
lineoffset-linelength/2). Defaults to 1
*linewidth*
a single numerical value
*color*
must be a sequence of RGBA tuples (e.g., arbitrary color
strings, etc, not allowed).
*linestyle* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
*antialiased*
1 or 2
If *linewidth*, *color*, or *antialiased* is None, they
default to their rcParams setting, in sequence form.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` array
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (i.e., a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
**Example:**
.. plot:: mpl_examples/pylab_examples/eventcollection_demo.py
"""
segment = (lineoffset + linelength / 2.,
lineoffset - linelength / 2.)
if len(positions) == 0:
segments = []
elif hasattr(positions, 'ndim') and positions.ndim > 1:
raise ValueError('if positions is an ndarry it cannot have '
'dimensionality great than 1 ')
elif (orientation is None or orientation.lower() == 'none' or
orientation.lower() == 'horizontal'):
positions.sort()
segments = [[(coord1, coord2) for coord2 in segment] for
coord1 in positions]
self._is_horizontal = True
elif orientation.lower() == 'vertical':
positions.sort()
segments = [[(coord2, coord1) for coord2 in segment] for
coord1 in positions]
self._is_horizontal = False
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
LineCollection.__init__(self,
segments,
linewidths=linewidth,
colors=color,
antialiaseds=antialiased,
linestyles=linestyle,
**kwargs)
self._linelength = linelength
self._lineoffset = lineoffset
def get_positions(self):
'''
return an array containing the floating-point values of the positions
'''
segments = self.get_segments()
pos = 0 if self.is_horizontal() else 1
positions = []
for segment in segments:
positions.append(segment[0, pos])
return positions
def set_positions(self, positions):
'''
set the positions of the events to the specified value
'''
if positions is None or (hasattr(positions, 'len') and
len(positions) == 0):
self.set_segments([])
return
lineoffset = self.get_lineoffset()
linelength = self.get_linelength()
segment = (lineoffset + linelength / 2.,
lineoffset - linelength / 2.)
positions = np.asanyarray(positions)
positions.sort()
if self.is_horizontal():
segments = [[(coord1, coord2) for coord2 in segment] for
coord1 in positions]
else:
segments = [[(coord2, coord1) for coord2 in segment] for
coord1 in positions]
self.set_segments(segments)
def add_positions(self, position):
'''
add one or more events at the specified positions
'''
if position is None or (hasattr(position, 'len') and
len(position) == 0):
return
positions = self.get_positions()
positions = np.hstack([positions, np.asanyarray(position)])
self.set_positions(positions)
extend_positions = append_positions = add_positions
def is_horizontal(self):
'''
True if the eventcollection is horizontal, False if vertical
'''
return self._is_horizontal
def get_orientation(self):
'''
get the orientation of the event line, may be:
[ 'horizontal' | 'vertical' ]
'''
return 'horizontal' if self.is_horizontal() else 'vertical'
def switch_orientation(self):
'''
switch the orientation of the event line, either from vertical to
horizontal or vice versus
'''
segments = self.get_segments()
for i, segment in enumerate(segments):
segments[i] = np.fliplr(segment)
self.set_segments(segments)
self._is_horizontal = not self.is_horizontal()
self.stale = True
def set_orientation(self, orientation=None):
'''
set the orientation of the event line
[ 'horizontal' | 'vertical' | None ]
defaults to 'horizontal' if not specified or None
'''
if (orientation is None or orientation.lower() == 'none' or
orientation.lower() == 'horizontal'):
is_horizontal = True
elif orientation.lower() == 'vertical':
is_horizontal = False
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
if is_horizontal == self.is_horizontal():
return
self.switch_orientation()
def get_linelength(self):
'''
get the length of the lines used to mark each event
'''
return self._linelength
def set_linelength(self, linelength):
'''
set the length of the lines used to mark each event
'''
if linelength == self.get_linelength():
return
lineoffset = self.get_lineoffset()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._linelength = linelength
def get_lineoffset(self):
'''
get the offset of the lines used to mark each event
'''
return self._lineoffset
def set_lineoffset(self, lineoffset):
'''
set the offset of the lines used to mark each event
'''
if lineoffset == self.get_lineoffset():
return
linelength = self.get_linelength()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._lineoffset = lineoffset
def get_linewidth(self):
'''
get the width of the lines used to mark each event
'''
return self.get_linewidths()[0]
def get_linestyle(self):
'''
get the style of the lines used to mark each event
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
'''
return self.get_linestyles()
def get_color(self):
'''
get the color of the lines used to mark each event
'''
return self.get_colors()[0]
class CircleCollection(_CollectionWithSizes):
"""
A collection of circles, drawn using splines.
"""
_factor = CIRCLE_AREA_FACTOR
@docstring.dedent_interpd
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
@docstring.dedent_interpd
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
lengths of first axes (e.g., major axis lengths)
*heights*: sequence
lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height'
| 'x' | 'y' | 'xy']
units in which majors and minors are given; 'width' and
'height' refer to the dimensions of the axes, while 'x'
and 'y' refer to the *offsets* data units. 'xy' differs
from all others in that the angle as plotted varies with
the aspect ratio, and equals the specified angle only when
the aspect ratio is unity. Hence it behaves the same as
the :class:`~matplotlib.patches.Ellipse` with
axes.transData as its transform.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self, **kwargs)
self._widths = 0.5 * np.asarray(widths).ravel()
self._heights = 0.5 * np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() * (np.pi / 180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = np.empty((0, 3, 3))
self._paths = [mpath.Path.unit_circle()]
def _set_transforms(self):
"""
Calculate transforms immediately before drawing.
"""
ax = self.axes
fig = self.figure
if self._units == 'xy':
sc = 1
elif self._units == 'x':
sc = ax.bbox.width / ax.viewLim.width
elif self._units == 'y':
sc = ax.bbox.height / ax.viewLim.height
elif self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
self._transforms = np.zeros((len(self._widths), 3, 3))
widths = self._widths * sc
heights = self._heights * sc
sin_angle = np.sin(self._angles)
cos_angle = np.cos(self._angles)
self._transforms[:, 0, 0] = widths * cos_angle
self._transforms[:, 0, 1] = heights * -sin_angle
self._transforms[:, 1, 0] = widths * sin_angle
self._transforms[:, 1, 1] = heights * cos_angle
self._transforms[:, 2, 2] = 1.0
_affine = transforms.Affine2D
if self._units == 'xy':
m = ax.transData.get_affine().get_matrix().copy()
m[:2, 2:] = 0
self.set_transform(_affine(m))
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
Collection.draw(self, renderer)
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (i.e., a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.get_fill():
return patch.get_facecolor()
return [0, 0, 0, 0]
kwargs['facecolors'] = [determine_facecolor(p) for p in patches]
kwargs['edgecolors'] = [p.get_edgecolor() for p in patches]
kwargs['linewidths'] = [p.get_linewidth() for p in patches]
kwargs['linestyles'] = [p.get_linestyle() for p in patches]
kwargs['antialiaseds'] = [p.get_antialiased() for p in patches]
Collection.__init__(self, **kwargs)
self.set_paths(patches)
def set_paths(self, patches):
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
class TriMesh(Collection):
"""
Class for the efficient drawing of a triangular mesh using
Gouraud shading.
A triangular mesh is a :class:`~matplotlib.tri.Triangulation`
object.
"""
def __init__(self, triangulation, **kwargs):
Collection.__init__(self, **kwargs)
self._triangulation = triangulation
self._shading = 'gouraud'
self._is_filled = True
self._bbox = transforms.Bbox.unit()
# Unfortunately this requires a copy, unless Triangulation
# was rewritten.
xy = np.hstack((triangulation.x.reshape(-1, 1),
triangulation.y.reshape(-1, 1)))
self._bbox.update_from_data_xy(xy)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(self._triangulation)
@staticmethod
def convert_mesh_to_paths(tri):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support meshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][..., np.newaxis],
tri.y[triangles][..., np.newaxis]), axis=2)
return [Path(x) for x in verts]
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
# Get a list of triangles and the color at each vertex.
tri = self._triangulation
triangles = tri.get_masked_triangles()
verts = np.concatenate((tri.x[triangles][..., np.newaxis],
tri.y[triangles][..., np.newaxis]), axis=2)
self.update_scalarmappable()
colors = self._facecolors[triangles]
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
gc.restore()
renderer.close_group(self.__class__.__name__)
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
*shading* may be 'flat', or 'gouraud'
"""
def __init__(self, meshWidth, meshHeight, coordinates,
antialiased=True, shading='flat', **kwargs):
Collection.__init__(self, **kwargs)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._antialiased = antialiased
self._shading = shading
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape(
(meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
self.stale = True
def get_datalim(self, transData):
return (self.get_transform() - transData).transform_bbox(self._bbox)
@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1:],
c[1:, 1:],
c[1:, 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
def convert_mesh_to_triangles(self, meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of triangles, each point
with its own color. This is useful for experiments using
`draw_qouraud_triangle`.
"""
if ma.isMaskedArray(coordinates):
p = coordinates.data
else:
p = coordinates
p_a = p[:-1, :-1]
p_b = p[:-1, 1:]
p_c = p[1:, 1:]
p_d = p[1:, :-1]
p_center = (p_a + p_b + p_c + p_d) / 4.0
triangles = np.concatenate((
p_a, p_b, p_center,
p_b, p_c, p_center,
p_c, p_d, p_center,
p_d, p_a, p_center,
), axis=2)
triangles = triangles.reshape((meshWidth * meshHeight * 4, 3, 2))
c = self.get_facecolor().reshape((meshHeight + 1, meshWidth + 1, 4))
c_a = c[:-1, :-1]
c_b = c[:-1, 1:]
c_c = c[1:, 1:]
c_d = c[1:, :-1]
c_center = (c_a + c_b + c_c + c_d) / 4.0
colors = np.concatenate((
c_a, c_b, c_center,
c_b, c_c, c_center,
c_c, c_d, c_center,
c_d, c_a, c_center,
), axis=2)
colors = colors.reshape((meshWidth * meshHeight * 4, 3, 4))
return triangles, colors
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:, 0])
ys = self.convert_yunits(self._offsets[:, 1])
offsets = list(zip(xs, ys))
offsets = np.asarray(offsets, np.float_)
offsets.shape = (-1, 2) # Make it Nx2
self.update_scalarmappable()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
if self._shading == 'gouraud':
triangles, colors = self.convert_mesh_to_triangles(
self._meshWidth, self._meshHeight, coordinates)
renderer.draw_gouraud_triangles(
gc, triangles, colors, transform.frozen())
else:
renderer.draw_quad_mesh(
gc, transform.frozen(), self._meshWidth, self._meshHeight,
coordinates, offsets, transOffset, self.get_facecolor(),
self._antialiased, self.get_edgecolors())
gc.restore()
renderer.close_group(self.__class__.__name__)
self.stale = False
patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'TriMesh', 'PolyCollection', 'BrokenBarHCollection',
'RegularPolyCollection', 'PathCollection',
'StarPolygonCollection', 'PatchCollection',
'CircleCollection', 'Collection',):
docstring.interpd.update({k: patchstr})
docstring.interpd.update(LineCollection=artist.kwdoc(LineCollection))
| mit |
mikebenfield/scikit-learn | sklearn/tests/test_common.py | 39 | 6031 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import re
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield (_named_check(check_parameters_default_constructible, name),
name, Estimator)
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield _named_check(check, name), name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin))]
for name, Classifier in linear_classifiers:
yield _named_check(check_class_weight_balanced_linear_classifier,
name), name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = dict((name, ispkg)
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__,
prefix='sklearn.'))
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert_equal(missing_tests, [],
'{0} do not have `tests` subpackages. Perhaps they require '
'__init__.py or an add_subpackage directive in the parent '
'setup.py'.format(missing_tests))
| bsd-3-clause |
vermouthmjl/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
rustychris/stompy | test/test_paver.py | 1 | 12226 | import os
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import matplotlib.pyplot as plt
from stompy.grid import paver
from stompy.spatial.linestring_utils import upsample_linearring,resample_linearring
from stompy.grid import paver
from stompy.spatial import field,constrained_delaunay,wkb2shp
##
from stompy.grid import exact_delaunay
from stompy.grid import live_dt
from stompy.grid import paver
reload(exact_delaunay)
reload(live_dt)
reload(paver)
##
def test_basic():
# Define a polygon
boundary=np.array([[0,0],[1000,0],[1000,1000],[0,1000]])
island =np.array([[200,200],[600,200],[200,600]])
rings=[boundary,island]
# And the scale:
scale=field.ConstantField(50)
p=paver.Paving(rings=rings,density=scale)
p.pave_all()
##
def test_basic_apollo():
# Define a polygon
boundary=np.array([[0,0],[1000,0],[1000,1000],[0,1000]])
island =np.array([[200,200],[600,200],[200,600]])
rings=[boundary,island]
# And the scale:
scale=field.PyApolloniusField()
scale.insert([50,50],20)
p=paver.Paving(rings=rings,density=scale)
p.pave_all()
return p
##
# A circle - r = 100, C=628, n_points = 628
def test_circle():
r = 100
thetas = np.linspace(0,2*np.pi,200)[:-1]
circle = np.zeros((len(thetas),2),np.float64)
circle[:,0] = r*np.cos(thetas)
circle[:,1] = r*np.sin(thetas)
class CircleDensityField(field.Field):
# horizontally varying, from 5 to 20
def value(self,X):
X = np.array(X)
return 5 + 15 * (X[...,0] + 100) / 200.0
density = CircleDensityField()
p=paver.Paving(circle,density,label='circle')
p.pave_all()
def test_long_channel():
l = 2000
w = 50
long_channel = np.array([[0,0],
[l,0],
[l,w],
[0,w]], np.float64 )
density = field.ConstantField( 19.245 )
p=paver.Paving(long_channel,density)
p.pave_all()
def test_long_channel_rigid():
l = 2000
w = 50
long_channel = np.array([[0,0],
[l,0],
[l,w],
[0,w]], np.float64 )
density = field.ConstantField( 19.245 )
p=paver.Paving(long_channel,density,initial_node_status=paver.Paving.RIGID)
p.pave_all()
def test_narrow_channel():
l = 1000
w = 50
long_channel = np.array([[0,0],
[l,0.375*w],
[l,0.625*w],
[0,w]], np.float64 )
density = field.ConstantField( w/np.sin(60*np.pi/180.) / 4 )
p=paver.Paving(long_channel,density)
p.pave_all()
def test_small_island():
l = 100
square = np.array([[0,0],
[l,0],
[l,l],
[0,l]], np.float64 )
r=10
theta = np.linspace(0,2*np.pi,30)
circle = r/np.sqrt(2) * np.swapaxes( np.array([np.cos(theta), np.sin(theta)]), 0,1)
island1 = circle + np.array([45,45])
island2 = circle + np.array([65,65])
island3 = circle + np.array([20,80])
rings = [square,island1,island2,island3]
density = field.ConstantField( 10 )
p=paver.Paving(rings,density)
p.pave_all()
def test_tight_peanut():
r = 100
thetas = np.linspace(0,2*np.pi,300)
peanut = np.zeros( (len(thetas),2), np.float64)
x = r*np.cos(thetas)
y = r*np.sin(thetas) * (0.9/10000 * x*x + 0.05)
peanut[:,0] = x
peanut[:,1] = y
density = field.ConstantField( 6.0 )
p=paver.Paving(peanut,density,label='tight_peanut')
p.pave_all()
def test_tight_with_island():
# build a peanut first:
r = 100
thetas = np.linspace(0,2*np.pi,250)
peanut = np.zeros( (len(thetas),2), np.float64)
x = r*np.cos(thetas)
y = r*np.sin(thetas) * (0.9/10000 * x*x + 0.05)
peanut[:,0] = x
peanut[:,1] = y
# put two holes into it
thetas = np.linspace(0,2*np.pi,30)
hole1 = np.zeros( (len(thetas),2), np.float64)
hole1[:,0] = 10*np.cos(thetas) - 75
hole1[:,1] = 10*np.sin(thetas)
hole2 = np.zeros( (len(thetas),2), np.float64)
hole2[:,0] = 20*np.cos(thetas) + 75
hole2[:,1] = 20*np.sin(thetas)
rings = [peanut,hole1,hole2]
density = field.ConstantField( 6.0 )
p=paver.Paving(rings,density,label='tight_with_island')
p.pave_all()
def test_peninsula():
r = 100
thetas = np.linspace(0,2*np.pi,1000)
pen = np.zeros( (len(thetas),2), np.float64)
pen[:,0] = r*(0.2+ np.abs(np.sin(2*thetas))**0.2)*np.cos(thetas)
pen[:,1] = r*(0.2+ np.abs(np.sin(2*thetas))**0.2)*np.sin(thetas)
density = field.ConstantField( 10.0 )
pen2 = upsample_linearring(pen,density)
p=paver.Paving(pen2,density,label='peninsula')
p.pave_all()
def test_peanut():
# like a figure 8, or a peanut
r = 100
thetas = np.linspace(0,2*np.pi,1000)
peanut = np.zeros( (len(thetas),2), np.float64)
peanut[:,0] = r*(0.5+0.3*np.cos(2*thetas))*np.cos(thetas)
peanut[:,1] = r*(0.5+0.3*np.cos(2*thetas))*np.sin(thetas)
min_pnt = peanut.min(axis=0)
max_pnt = peanut.max(axis=0)
d_data = np.array([ [min_pnt[0],min_pnt[1], 1.5],
[min_pnt[0],max_pnt[1], 1.5],
[max_pnt[0],min_pnt[1], 8],
[max_pnt[0],max_pnt[1], 8]])
density = field.XYZField(X=d_data[:,:2],F=d_data[:,2])
p=paver.Paving(peanut,density)
p.pave_all()
def test_cul_de_sac():
r=5
theta = np.linspace(-np.pi/2,np.pi/2,20)
cap = r * np.swapaxes( np.array([np.cos(theta), np.sin(theta)]), 0,1)
box = np.array([ [-3*r,r],
[-4*r,-r] ])
ring = np.concatenate((box,cap))
density = field.ConstantField(2*r/(np.sqrt(3)/2))
p=paver.Paving(ring,density,label='cul_de_sac')
p.pave_all()
def test_bow():
x = np.linspace(-100,100,50)
# with /1000 it seems to do okay
# with /500 it still looks okay
y = x**2 / 250.0
bow = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
height = np.array([0,20])
ring = np.concatenate( (bow+height,bow[::-1]-height) )
density = field.ConstantField(2)
p=paver.Paving(ring,density,label='bow')
p.pave_all()
def test_ngon(nsides=7):
# hexagon works ok, though a bit of perturbation
# septagon starts to show expansion issues, but never pronounced
# octagon - works fine.
theta = np.linspace(0,2*np.pi,nsides+1)[:-1]
r=100
x = r*np.cos(theta)
y = r*np.sin(theta)
poly = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
density = field.ConstantField(6)
p=paver.Paving(poly,density,label='ngon%02d'%nsides)
p.pave_all()
def test_expansion():
# 40: too close to a 120deg angle - always bisect on centerline
# 30: rows alternate with wall and bisect seams
# 35: starts to diverge, but recovers.
# 37: too close to 120.
d = 36
pnts = np.array([[0.,0.],
[100,-d],
[200,0],
[200,100],
[100,100+d],
[0,100]])
density = field.ConstantField(6)
p=paver.Paving([pnts],density,label='expansion')
p.pave_all()
def test_embedded_channel():
# trying out degenerate internal lines - the trick may be mostly in
# how to specify them.
# make a large rectangle, with a sinuous channel in the middle
L = 500.0
W = 300.0
rect = np.array([[0,0],
[L,0],
[L,W],
[0,W]])
x = np.linspace(0.1*L,0.9*L,50)
y = W/2 + 0.1*W*np.cos(4*np.pi*x/L)
shore = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
density = field.ConstantField(10)
# this will probably get moved into Paver itself.
# Note closed_ring=0 !
shore = resample_linearring(shore,density,closed_ring=0)
south_shore = shore - np.array([0,0.1*W])
north_shore = shore + np.array([0,0.1*W])
p=paver.Paving([rect],density,degenerates=[north_shore,south_shore])
p.pave_all()
# dumbarton...
def test_dumbarton():
shp=os.path.join( os.path.dirname(__file__), 'data','dumbarton.shp')
features=wkb2shp.shp2geom(shp)
geom = features['geom'][0]
dumbarton = np.array(geom.exterior)
density = field.ConstantField(250.0)
p=paver.Paving(dumbarton, density,label='dumbarton')
p.pave_all()
# #def log_spiral_channel():
# t = linspace(1.0,12*pi,200)
# a = 1 ; b = 0.1
# x = a*exp(b*t)*cos(t)
# y = a*exp(b*t)*sin(t)
# # each 2*pi, the radius gets bigger by exp(2pi*b)
# x2 = a*exp(b*t-b*pi)*cos(t)
# y2 = a*exp(b*t-b*pi)*sin(t)
# cla(); plot(x,y,'b',x2,y2,'r')
##
# This is going to require a fair bit of porting --
# hmm - maybe better just to have a sinusoid channel, then perturb it
# and put some islands in there. having a wide range of scales looks
# nice but isn't going to be a great test.
def gen_sine_sine():
t = np.linspace(1.0,12*np.pi,400)
x1 = 100*t
y1 = 200*np.sin(t)
# each 2*pi, the radius gets bigger by exp(2pi*b)
x2 = x1
y2 = y1+50
# now perturb both sides, but keep amplitude < 20
y1 = y1 + 20*np.sin(10*t)
y2 = y2 + 10*np.cos(5*t)
x = np.concatenate( (x1,x2[::-1]) )
y = np.concatenate( (y1,y2[::-1]) )
shore = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
rings = [shore]
# and make some islands:
north_island_shore = 0.4*y1 + 0.6*y2
south_island_shore = 0.6*y1 + 0.4*y2
Nislands = 20
# islands same length as space between islands, so divide
# island shorelines into 2*Nislands blocks
for i in range(Nislands):
i_start = int( (2*i+0.5)*len(t)/(2*Nislands) )
i_stop = int( (2*i+1.5)*len(t)/(2*Nislands) )
north_y = north_island_shore[i_start:i_stop]
south_y = south_island_shore[i_start:i_stop]
north_x = x1[i_start:i_stop]
south_x = x2[i_start:i_stop]
x = np.concatenate( (north_x,south_x[::-1]) )
y = np.concatenate( (north_y,south_y[::-1]) )
island = np.swapaxes( np.concatenate( (x[None,:],y[None,:]) ), 0,1)
rings.append(island)
density = field.ConstantField(25.0)
min_density = field.ConstantField(2.0)
p = paver.Paving(rings,density=density,min_density=min_density)
print("Smoothing to nominal 1.0m")
# mostly just to make sure that long segments are
# sampled well relative to the local feature scale.
p.smooth()
print("Adjusting other densities to local feature size")
p.telescope_rate=1.1
p.adjust_density_by_apollonius()
return p
def test_sine_sine():
p=gen_sine_sine()
p.pave_all()
if 0:
# debugging the issue with sine_sine()
# fails deep inside here, step 512
# lots of crap coming from this one, too.
# at some point, dt_incident_constraints reports only 1 constraint,
# but it should have two, which happens because of a bad slide.
# Tricky to guard against -
# Several avenues to fix this:
# 1. Make the resample_neighbors code (which I'm pretty sure is the culprit)
# more cautious and willing to accept a local maximum in distance instead
# of shoving a node far away. This is a nice but incomplete solution.
# 2. The resample code, which I think is responsible for adding the new node
# that screwed it all up, should check for self-intersections
# this is probably the appropriate thing to do.
# test_sine_sine()
p=gen_sine_sine()
p.pave_all(n_steps=512)
##
p.verbose=3
p.pave_all(n_steps=513)
##
zoom=plt.axis()
plt.figure(1).clf()
p.plot()
p.plot_boundary()
plt.axis('equal')
plt.axis(zoom)
##
# Step 510 really takes the end off an island
# yep.
p.pave_all(n_steps=512)
##
# node is 3626
# to_remove: an edge with nodes 5374, 3626
# pnt2edges: [3626, 5915]
# part of the problem is that there is some sliding around
# at the beginning of step 512 that really wreaks havoc on
# what was already a dicey node.
p.plot_nodes([3626,5374])
| mit |
bsipocz/statsmodels | statsmodels/sandbox/tsa/varma.py | 33 | 5032 | '''VAR and VARMA process
this doesn't actually do much, trying out a version for a time loop
alternative representation:
* textbook, different blocks in matrices
* Kalman filter
* VAR, VARX and ARX could be calculated with signal.lfilter
only tried some examples, not implemented
TODO: try minimizing sum of squares of (Y-Yhat)
Note: filter has smallest lag at end of array and largest lag at beginning,
be careful for asymmetric lags coefficients
check this again if it is consistently used
changes
2009-09-08 : separated from movstat.py
Author : josefpkt
License : BSD
'''
from __future__ import print_function
import numpy as np
from scipy import signal
#import matplotlib.pylab as plt
from numpy.testing import assert_array_equal, assert_array_almost_equal
#NOTE: this just returns that predicted values given the
#B matrix in polynomial form.
#TODO: make sure VAR class returns B/params in this form.
def VAR(x,B, const=0):
''' multivariate linear filter
Parameters
----------
x: (TxK) array
columns are variables, rows are observations for time period
B: (PxKxK) array
b_t-1 is bottom "row", b_t-P is top "row" when printing
B(:,:,0) is lag polynomial matrix for variable 1
B(:,:,k) is lag polynomial matrix for variable k
B(p,:,k) is pth lag for variable k
B[p,:,:].T corresponds to A_p in Wikipedia
const: float or array (not tested)
constant added to autoregression
Returns
-------
xhat: (TxK) array
filtered, predicted values of x array
Notes
-----
xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } for all i = 0,K-1, for all t=p..T
xhat does not include the forecasting observation, xhat(T+1),
xhat is 1 row shorter than signal.correlate
References
----------
http://en.wikipedia.org/wiki/Vector_Autoregression
http://en.wikipedia.org/wiki/General_matrix_notation_of_a_VAR(p)
'''
p = B.shape[0]
T = x.shape[0]
xhat = np.zeros(x.shape)
for t in range(p,T): #[p+2]:#
## print(p,T)
## print(x[t-p:t,:,np.newaxis].shape)
## print(B.shape)
#print(x[t-p:t,:,np.newaxis])
xhat[t,:] = const + (x[t-p:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0)
return xhat
def VARMA(x,B,C, const=0):
''' multivariate linear filter
x (TxK)
B (PxKxK)
xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } +
sum{_q}sum{_k} { e(t-Q:t,:) .* C(:,:,i) }for all i = 0,K-1
'''
P = B.shape[0]
Q = C.shape[0]
T = x.shape[0]
xhat = np.zeros(x.shape)
e = np.zeros(x.shape)
start = max(P,Q)
for t in range(start,T): #[p+2]:#
## print(p,T
## print(x[t-p:t,:,np.newaxis].shape
## print(B.shape
#print(x[t-p:t,:,np.newaxis]
xhat[t,:] = const + (x[t-P:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0) + \
(e[t-Q:t,:,np.newaxis]*C).sum(axis=1).sum(axis=0)
e[t,:] = x[t,:] - xhat[t,:]
return xhat, e
if __name__ == '__main__':
T = 20
K = 2
P = 3
#x = np.arange(10).reshape(5,2)
x = np.column_stack([np.arange(T)]*K)
B = np.ones((P,K,K))
#B[:,:,1] = 2
B[:,:,1] = [[0,0],[0,0],[0,1]]
xhat = VAR(x,B)
print(np.all(xhat[P:,0]==np.correlate(x[:-1,0],np.ones(P))*2))
#print(xhat)
T = 20
K = 2
Q = 2
P = 3
const = 1
#x = np.arange(10).reshape(5,2)
x = np.column_stack([np.arange(T)]*K)
B = np.ones((P,K,K))
#B[:,:,1] = 2
B[:,:,1] = [[0,0],[0,0],[0,1]]
C = np.zeros((Q,K,K))
xhat1 = VAR(x,B, const=const)
xhat2, err2 = VARMA(x,B,C, const=const)
print(np.all(xhat2 == xhat1))
print(np.all(xhat2[P:,0] == np.correlate(x[:-1,0],np.ones(P))*2+const))
C[1,1,1] = 0.5
xhat3, err3 = VARMA(x,B,C)
x = np.r_[np.zeros((P,K)),x] #prepend inital conditions
xhat4, err4 = VARMA(x,B,C)
C[1,1,1] = 1
B[:,:,1] = [[0,0],[0,0],[0,1]]
xhat5, err5 = VARMA(x,B,C)
#print(err5)
#in differences
#VARMA(np.diff(x,axis=0),B,C)
#Note:
# * signal correlate applies same filter to all columns if kernel.shape[1]<K
# e.g. signal.correlate(x0,np.ones((3,1)),'valid')
# * if kernel.shape[1]==K, then `valid` produces a single column
# -> possible to run signal.correlate K times with different filters,
# see the following example, which replicates VAR filter
x0 = np.column_stack([np.arange(T), 2*np.arange(T)])
B[:,:,0] = np.ones((P,K))
B[:,:,1] = np.ones((P,K))
B[1,1,1] = 0
xhat0 = VAR(x0,B)
xcorr00 = signal.correlate(x0,B[:,:,0])#[:,0]
xcorr01 = signal.correlate(x0,B[:,:,1])
print(np.all(signal.correlate(x0,B[:,:,0],'valid')[:-1,0]==xhat0[P:,0]))
print(np.all(signal.correlate(x0,B[:,:,1],'valid')[:-1,0]==xhat0[P:,1]))
#import error
#from movstat import acovf, acf
from statsmodels.tsa.stattools import acovf, acf
aav = acovf(x[:,0])
print(aav[0] == np.var(x[:,0]))
aac = acf(x[:,0])
| bsd-3-clause |
jmargeta/scikit-learn | sklearn/decomposition/dict_learning.py | 4 | 40462 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD
import time
import sys
import itertools
from math import sqrt, floor, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import array2d, check_random_state, gen_even_slices
from ..utils.extmath import randomized_svd
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter)
clf.fit(dictionary.T, X.T, Xy=cov, coef_init=init)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
norms_squared = np.sum((X ** 2), axis=1)
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
norms_squared, copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = array2d(dictionary)
X = array2d(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = max(n_features / 10, 1)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, n_jobs))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
n_batches = floor(float(len(X)) / batch_size)
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = np.array_split(X_train, n_batches)
batches = itertools.cycle(batches)
# The covariance of the dictionary
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
for ii, this_X in zip(range(iter_offset, iter_offset + n_iter), batches):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
return code, dictionary.T
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
# XXX : kwargs is not documented
X = array2d(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
`components_` : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : int,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
dictionary atoms extracted from the data
`error_` : array
vector of errors at each iteration
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E = dict_learning(X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : int,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
components extracted from the data
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U = dict_learning_online(X, n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
dict_init=self.dict_init,
batch_size=self.batch_size,
shuffle=self.shuffle, verbose=self.verbose,
random_state=random_state)
self.components_ = U
return self
def partial_fit(self, X, y=None, iter_offset=0):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self.random_state_):
self.random_state_ = check_random_state(self.random_state)
X = array2d(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
U = dict_learning_online(X, self.n_components, self.alpha,
n_iter=self.n_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset,
random_state=self.random_state_)
self.components_ = U
return self
| bsd-3-clause |
lpenguin/pandas-qt | pandasqt/views/EditDialogs.py | 4 | 8445 | import re
from pandasqt.compat import QtCore, QtGui, Qt, Slot, Signal
from pandasqt.models.SupportedDtypes import SupportedDtypes
import numpy
from pandas import Timestamp
from pandas.tslib import NaTType
class DefaultValueValidator(QtGui.QValidator):
def __init__(self, parent=None):
super(DefaultValueValidator, self).__init__(parent)
self.dtype = None
self.intPattern = re.compile('[-+]?\d+')
self.uintPattern = re.compile('\d+')
self.floatPattern = re.compile('[+-]? *(?:\d+(?:\.\d*)?|\.\d+)')
self.boolPattern = re.compile('(1|t|0|f){1}$')
@Slot(numpy.dtype)
def validateType(self, dtype):
self.dtype = dtype
def fixup(self, string):
pass
def validate(self, s, pos):
if not s:
# s is emtpy
return (QtGui.QValidator.Acceptable, s, pos)
if self.dtype in SupportedDtypes.strTypes():
return (QtGui.QValidator.Acceptable, s, pos)
elif self.dtype in SupportedDtypes.boolTypes():
match = re.match(self.boolPattern, s)
if match:
return (QtGui.QValidator.Acceptable, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
elif self.dtype in SupportedDtypes.datetimeTypes():
try:
ts = Timestamp(s)
except ValueError, e:
return (QtGui.QValidator.Intermediate, s, pos)
return (QtGui.QValidator.Acceptable, s, pos)
else:
dtypeInfo = None
if self.dtype in SupportedDtypes.intTypes():
match = re.search(self.intPattern, s)
if match:
try:
value = int(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.iinfo(self.dtype)
elif self.dtype in SupportedDtypes.uintTypes():
match = re.search(self.uintPattern, s)
if match:
try:
value = int(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.iinfo(self.dtype)
elif self.dtype in SupportedDtypes.floatTypes():
match = re.search(self.floatPattern, s)
print match
if match:
try:
value = float(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.finfo(self.dtype)
if dtypeInfo is not None:
if value >= dtypeInfo.min and value <= dtypeInfo.max:
return (QtGui.QValidator.Acceptable, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
return (QtGui.QValidator.Invalid, s, pos)
class AddAttributesDialog(QtGui.QDialog):
accepted = Signal(str, object, object)
def __init__(self, parent=None):
super(AddAttributesDialog, self).__init__(parent)
self.initUi()
def initUi(self):
self.setModal(True)
self.resize(303, 168)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(self)
self.dialogHeading = QtGui.QLabel(self.tr('Add a new attribute column'), self)
self.gridLayout = QtGui.QGridLayout()
self.columnNameLineEdit = QtGui.QLineEdit(self)
self.columnNameLabel = QtGui.QLabel(self.tr('Name'), self)
self.dataTypeComboBox = QtGui.QComboBox(self)
self.dataTypeComboBox.addItems(SupportedDtypes.names())
self.columnTypeLabel = QtGui.QLabel(self.tr('Type'), self)
self.defaultValueLineEdit = QtGui.QLineEdit(self)
self.lineEditValidator = DefaultValueValidator(self)
self.defaultValueLineEdit.setValidator(self.lineEditValidator)
self.defaultValueLabel = QtGui.QLabel(self.tr('Inital Value(s)'), self)
self.gridLayout.addWidget(self.columnNameLabel, 0, 0, 1, 1)
self.gridLayout.addWidget(self.columnNameLineEdit, 0, 1, 1, 1)
self.gridLayout.addWidget(self.columnTypeLabel, 1, 0, 1, 1)
self.gridLayout.addWidget(self.dataTypeComboBox, 1, 1, 1, 1)
self.gridLayout.addWidget(self.defaultValueLabel, 2, 0, 1, 1)
self.gridLayout.addWidget(self.defaultValueLineEdit, 2, 1, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.dialogHeading)
self.verticalLayout.addLayout(self.gridLayout)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.dataTypeComboBox.currentIndexChanged.connect(self.updateValidatorDtype)
self.updateValidatorDtype(self.dataTypeComboBox.currentIndex())
def accept(self):
super(AddAttributesDialog, self).accept()
newColumn = self.columnNameLineEdit.text()
dtype = SupportedDtypes.dtype(self.dataTypeComboBox.currentText())
defaultValue = self.defaultValueLineEdit.text()
try:
if dtype in SupportedDtypes.intTypes() + SupportedDtypes.uintTypes():
defaultValue = int(defaultValue)
elif dtype in SupportedDtypes.floatTypes():
defaultValue = float(defaultValue)
elif dtype in SupportedDtypes.boolTypes():
defaultValue = defaultValue.lower() in ['t', '1']
elif dtype in SupportedDtypes.datetimeTypes():
defaultValue = Timestamp(defaultValue)
if isinstance(defaultValue, NaTType):
defaultValue = Timestamp('')
else:
defaultValue = dtype.type()
except ValueError, e:
defaultValue = dtype.type()
self.accepted.emit(newColumn, dtype, defaultValue)
@Slot(int)
def updateValidatorDtype(self, index):
(dtype, name) = SupportedDtypes.tupleAt(index)
self.defaultValueLineEdit.clear()
self.lineEditValidator.validateType(dtype)
class RemoveAttributesDialog(QtGui.QDialog):
accepted = Signal(list)
def __init__(self, columns, parent=None):
super(RemoveAttributesDialog, self).__init__(parent)
self.columns = columns
self.initUi()
def initUi(self):
self.setWindowTitle(self.tr('Remove Attributes'))
self.setModal(True)
self.resize(366, 274)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
self.setSizePolicy(sizePolicy)
self.gridLayout = QtGui.QGridLayout(self)
self.dialogHeading = QtGui.QLabel(self.tr('Select the attribute column(s) which shall be removed'), self)
self.listView = QtGui.QListView(self)
model = QtGui.QStandardItemModel()
for column in self.columns:
item = QtGui.QStandardItem(column)
model.appendRow(item)
self.listView.setModel(model)
self.listView.setSelectionMode(QtGui.QListView.MultiSelection)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.gridLayout.addWidget(self.dialogHeading, 0, 0, 1, 1)
self.gridLayout.addWidget(self.listView, 1, 0, 1, 1)
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def accept(self):
selection = self.listView.selectedIndexes()
names = []
for index in selection:
position = index.row()
names.append((position, index.data(QtCore.Qt.DisplayRole)))
super(RemoveAttributesDialog, self).accept()
self.accepted.emit(names) | mit |
adamtiger/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 37 | 3774 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/backends/backend_gtk.py | 11 | 37356 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys, warnings
def fn_name(): return sys._getframe(1).f_code.co_name
if six.PY3:
warnings.warn(
"The gtk* backends have not been tested with Python 3.x",
ImportWarning)
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,4,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
_new_tooltip_api = (gtk.pygtk_version[1] >= 12)
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import markers
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
# Hide the benign warning that it can't stat a file that doesn't
warnings.filterwarnings('ignore', '.*Unable to retrieve the file info for.*', gtk.Warning)
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if gtk.main_level() == 0:
gtk.main()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK(figure)
manager = FigureManagerGTK(canvas, num)
return manager
class TimerGTK(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = gobject.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
gobject.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
65511 : 'super',
65512 : 'super',
65406 : 'alt',
65289 : 'tab',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
self.last_downclick = {}
def destroy(self):
#gtk.DrawingArea.destroy(self)
self.close_event()
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
dblclick = (event.type == gdk._2BUTTON_PRESS)
if not dblclick:
# GTK is the only backend that generates a DOWN-UP-DOWN-DBLCLICK-UP event
# sequence for a double click. All other backends have a DOWN-UP-DBLCLICK-UP
# sequence. In order to provide consistency to matplotlib users, we will
# eat the extra DOWN event in the case that we detect it is part of a double
# click.
# first, get the double click time in milliseconds.
current_time = event.get_time()
last_time = self.last_downclick.get(event.button,0)
dblclick_time = gtk.settings_get_for_screen(gdk.screen_get_default()).get_property('gtk-double-click-time')
delta_time = current_time-last_time
if delta_time < dblclick_time:
del self.last_downclick[event.button] # we do not want to eat more than one event.
return False # eat.
self.last_downclick[event.button] = current_time
FigureCanvasBase.button_press_event(self, x, y, event.button, dblclick=dblclick, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
x, y, state = event.window.get_pointer()
FigureCanvasBase.enter_notify_event(self, event, xy=(x, y))
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
for key_mask, prefix in (
[gdk.MOD4_MASK, 'super'],
[gdk.MOD1_MASK, 'alt'],
[gdk.CONTROL_MASK, 'ctrl'], ):
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
# set the default quality, if we are writing a JPEG.
# http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
options = cbook.restrict_dict(kwargs, ['quality'])
if format in ['jpg','jpeg']:
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
options['quality'] = str(options['quality'])
if is_string_like(filename):
try:
pixbuf.save(filename, format, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK(*args, **kwargs)
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.set_window_title("Figure %d" % num)
if (window_icon):
try:
self.window.set_icon_from_file(window_icon)
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# diong a blanket catch here, but an not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK.%s' % fn_name())
if hasattr(self, 'toolbar') and self.toolbar is not None:
self.toolbar.destroy()
if hasattr(self, 'vbox'):
self.vbox.destroy()
if hasattr(self, 'window'):
self.window.destroy()
if hasattr(self, 'canvas'):
self.canvas.destroy()
self.__dict__.clear() #Is this needed? Other backends don't have it.
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle(self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0,x1), min(y0, y1), w, h)]
try:
lastrect, pixmapBack = self._pixmapBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._pixmapBack = axrect, gtk.gdk.Pixmap(drawable, w, h)
self._pixmapBack[1].draw_drawable(gc, drawable, l, b, 0, 0, w, h)
else:
drawable.draw_drawable(gc, pixmapBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
self._init_toolbar2_4()
def _init_toolbar2_4(self):
basedir = os.path.join(rcParams['datapath'],'images')
if not _new_tooltip_api:
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
if _new_tooltip_api:
tbutton.set_tooltip_text(tooltip_text)
else:
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
if (window_icon):
try: window.set_icon_from_file(window_icon)
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which presents the user with a menu
of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super(FileChooserDialog, self).__init__ (title, parent, action,
buttons)
super(FileChooserDialog, self).set_do_overwrite_confirmation(True)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
return filename, self.ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in markers.MarkerStyle.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0) or sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(rcParams['datapath'], 'images', icon_filename)
except:
window_icon = None
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureCanvas = FigureCanvasGTK
FigureManager = FigureManagerGTK
| gpl-2.0 |
krez13/scikit-learn | sklearn/gaussian_process/kernels.py | 18 | 66251 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = (bounds == "fixed")
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
bobmyhill/burnman | burnman/output_seismo.py | 2 | 15783 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
import numpy as np
import warnings
import scipy.integrate
import matplotlib.pyplot as plt
import pkgutil
from . import tools
from . import constants
from . import seismic
from . import geotherm
from .planet import Planet
from .layer import Layer
def write_tvel_file(planet_or_layer, filename='burnmanmodel.tvel', background_model=None):
"""
Function to write input file for obspy travel time calculations.
Note: Because density isn't defined for most 1D seismic models, densities are output as zeroes. The tvel format has a column for density, but this column is not used by obspy for travel time calculations.
Parameters
----------
planet_or_layer : burnman.Planet() or burnman.Layer()
Planet or layer to write out to tvel file
filename : string
Filename to read to
background_model : burnman.seismic.Seismic1DModel()
1D seismic model to fill in parts of planet (likely to be an earth model) that aren't defined by layer (only need when using Layer())
"""
if not isinstance(planet_or_layer, (Planet, Layer)):
raise TypeError("Input must be a Planet() or Layer() ")
if isinstance(planet_or_layer, Layer):
assert(background_model)
layer = planet_or_layer
depths = background_model.internal_depth_list()
above_layer = np.where(depths < (np.max(depths) - layer.outer_radius))[-1]
below_layer = np.where( depths > (np.max(depths) -layer.inner_radius))[0]
data_above = list(zip(depths[above_layer] / 1.e3,
background_model.v_p(depths[above_layer]) / 1.e3,
background_model.v_s(depths[above_layer]) / 1.e3,
np.zeros_like(depths[above_layer])))
data_layer = list(zip((np.max(depths) -layer.radii)[::- 1] /1.e3,
layer.v_p[::-1] /1.e3,
layer.v_s[::-1] /1.e3,
layer.density[::-1] /1.e3))
data_below = list(zip(depths[below_layer] / 1.e3,
background_model.v_p(depths[below_layer]) / 1.e3,
background_model.v_s(depths[below_layer]) / 1.e3,
np.zeros_like(depths[below_layer])))
data = data_above + data_layer + data_below
with open(filename, 'wb') as f:
np.savetxt(f, data, header = layer.name + ' model from BurnMan between a radius of ' +
str(layer.inner_radius) + ' and ' + str(layer.outer_radius) +' km \n'+
background_model.__class__.__name__ +
' for the rest of the earth', fmt='%5.2f', delimiter='\t')
if isinstance(planet_or_layer, Planet):
planet = planet_or_layer
data = list(zip((planet.radius_planet - planet.radii)[::-1] / 1.e3,
planet.v_p[::-1] / 1.e3,
planet.v_s[::-1] /1.e3,
planet.density[::- 1] / 1.e3))
with open(filename, 'wb') as f:
np.savetxt(f, data, header = planet.name + ' model from BurnMan with a radius of ' +
str(planet.radius_planet) + ' km \n Layers of planet are ' +
", ".join(layer.name for layer in planet.layers), fmt='%5.2f', delimiter='\t')
def write_axisem_input( rock, min_depth=670.e3, max_depth=2890.e3, T0=1900,
filename='axisem_burnmantestrock.txt', axisem_ref='axisem_prem_ani_noocean.txt', plotting=False):
"""
Writing velocities and densities to AXISEM (www.axisem.info) input file
Default is set to replacing the lower mantle with the BurnMan rock
Note:
- This implementation uses PREM to convert from depths to pressures to compute at
- This implementation assumes an adiabatic temperature profile, only T0 at min_depth can be set
- Currently, it only honors the discontinuities already in the synthetic input file, so it is best
to only replace certain layers with burnman values (this should be improved in the future).
Parameters
----------
rock : burnman.Composite()
Composition to implement in the model
min_depth : float
minimum depth to replace model (m) (default = 670 km)
max_depth : float
minimum depth to replace model (m) (default = 2890 km)
T0 : float
Anchor temperature at min_depth for adiabatic profile (K) (default=1900)
filename: string
Output filename (default ='axisem_burnmantestrock.txt')
axisem_ref: string
Input filename (in burnman/data/input_seismic/) (default = 'axisem_prem_ani_noocean.txt')
plotting: Boolean
True means plot of the old model and replaced model will be shown (default = False)
"""
# Load reference input
datastream = pkgutil.get_data(
'burnman', 'data/input_seismic/' + axisem_ref)
lines = [line.strip()
for line in datastream.decode('ascii').split('\n') if line.strip()]
table = []
for line in lines[18:]:
numbers = np.fromstring(line, sep=' ')
if len(numbers) > 0:
if line[0] != "#" and line[0] != "%":
table.append(numbers)
table = np.array(table)
ref_radius = table[:, 0]
ref_depth = 6371.e3 - ref_radius
ref_density = table[:, 1]
ref_vpv = table[:, 2]
ref_vsv = table[:, 3]
ref_Qk = table[:, 4]
ref_Qmu = table[:, 5]
ref_vph = table[:, 6]
ref_vsh = table[:, 7]
ref_eta = table[:, 8]
# Cutting out range to input in Axisem reference file (currently the lower
# mantle)
indrange = [x for x in range(len(ref_depth)) if ref_depth[
x] > min_depth and ref_depth[x] < max_depth]
# pad both ends to include up to discontinuity, bit of a hack...
indrange.insert(0, indrange[0] - 1)
indrange.append(indrange[-1] + 1)
# Invert depthrange so adiabatic computations work!
depthrange = ref_depth[indrange]
# convert depths to pressures
pressures = seismic.PREM().pressure(depthrange)
# Computing adiabatic temperatures. T0 is an input parameter!
T0 = T0 # K
temperatures = geotherm.adiabatic(pressures, T0, rock)
print("Calculations are done for:")
rock.debug_print()
rock_vp, rock_vs, rock_rho = rock.evaluate(
['v_p', 'v_s', 'density'], pressures, temperatures)
discontinuity = 0
# WRITE OUT FILE
f = open(filename, 'w')
print('Writing ' + filename + ' ...')
f.write('# Input file ' +
filename +
' for AXISEM created using BurnMan, replacing ' +
axisem_ref +
' between ' +
str(np.round(min_depth /
1.e3)) +
' and ' +
str(np.round(max_depth /
1.e3)) +
' km \n')
f.write('NAME ' + filename + '\n')
for line in lines[2:18]:
f.write(line[:-1] + '\n')
for i in range(indrange[0]):
if i > 0 and ref_radius[i] == ref_radius[i - 1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity ' +
str(discontinuity) +
', depth: ' +
str(np.round(ref_depth[i] /
1.e3, decimals=2)) +
' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
for i in range(indrange[0], indrange[-1]):
ind2 = -1 + i - indrange[0]
if ref_radius[i] == ref_radius[i - 1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity ' +
str(discontinuity) +
', depth: ' +
str(np.round(ref_depth[i] /
1.e3, decimals=2)) +
' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i],
rock_rho[ind2],
rock_vp[ind2],
rock_vs[ind2],
ref_Qk[i],
ref_Qmu[i],
rock_vp[ind2],
rock_vs[ind2],
ref_eta[i]))
for i in range(indrange[-1], len(ref_radius)):
if ref_radius[i] == ref_radius[i - 1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity ' +
str(discontinuity) +
', depth: ' +
str(np.round(ref_depth[i] /
1.e3, decimals=2)) +
' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
f.close()
if plotting:
# plot vp
plt.plot(
ref_depth / 1.e3,
ref_vph / 1.e3,
color='g',
linestyle='-',
label='vp')
plt.plot(depthrange / 1.e3, rock_vp / 1.e3, color='g', linestyle='-',
marker='o', markerfacecolor='g', markersize=1)
# plot Vs
plt.plot(
ref_depth / 1.e3,
ref_vsh / 1.e3,
color='b',
linestyle='-',
label='vs')
plt.plot(depthrange / 1.e3, rock_vs / 1.e3, color='b', linestyle='-',
marker='o', markerfacecolor='b', markersize=1)
# plot density
plt.plot(
ref_depth / 1.e3,
ref_density / 1.e3,
color='r',
linestyle='-',
label='density')
plt.plot(depthrange / 1.e3, rock_rho / 1.e3, color='r', linestyle='-',
marker='o', markerfacecolor='r', markersize=1)
plt.title(filename + ' = ' + axisem_ref + ' replaced between ' +
str(min_depth / 1.e3) + ' and ' + str(max_depth / 1.e3) + ' km')
plt.legend(loc='lower right')
plt.show()
def write_mineos_input( rock, min_depth=670.e3, max_depth=2890.e3, T0=1900,
filename='mineos_burnmantestrock.txt', mineos_ref='mineos_prem_noocean.txt', plotting=False):
"""
Writing velocities and densities to Mineos (https://geodynamics.org/cig/software/mineos/) input file
Default is set to replacing the lower mantle with the BurnMan rock
Note:
- This implementation uses PREM to convert from depths to pressures to compute at
- This implementation assumes an adiabatic temperature profile, only T0 at min_depth can be set
- Currently, it only honors the discontinuities already in the synthetic input file, so it is best
to only replace certain layers with burnman values (this should be improved in the future).
Parameters
----------
rock : burnman.Composite()
Composition to implement in the model
min_depth : float
minimum depth to replace model (m) (default = 670 km)
max_depth : float
minimum depth to replace model (m) (default = 2890 km)
T0 : float
Anchor temperature at min_depth for adiabatic profile (K) (default=1900)
filename: string
Output filename (default ='mineos_burnmantestrock.txt')
axisem_ref: string
Input filename (in burnman/data/input_seismic/) (default = 'mineos_prem_noocean.txt')
plotting: Boolean
True means plot of the old model and replaced model will be shown (default = False)
"""
# Load reference input
datastream = pkgutil.get_data(
'burnman', 'data/input_seismic/' + mineos_ref)
lines = [line.strip()
for line in datastream.decode('ascii').split('\n') if line.strip()]
table = []
for line in lines[3:]:
numbers = np.fromstring(line, sep=' ')
table.append(numbers)
table = np.array(table)
ref_radius = table[:, 0]
ref_depth = 6371.e3 - ref_radius
ref_density = table[:, 1]
ref_vpv = table[:, 2]
ref_vsv = table[:, 3]
ref_Qk = table[:, 4]
ref_Qmu = table[:, 5]
ref_vph = table[:, 6]
ref_vsh = table[:, 7]
ref_eta = table[:, 8]
# Cutting out range to input in Mineos (currently the lower mantle)
indrange = [x for x in range(len(ref_depth)) if ref_depth[
x] > min_depth and ref_depth[x] < max_depth]
# pad both ends to include up to discontinuity, bit of a hack...
indrange.insert(0, indrange[0] - 1)
indrange.append(indrange[-1] + 1)
# Invert depthrange so adiabatic computations work!
depthrange = ref_depth[indrange][::-1]
# convert depths to pressures
pressures = seismic.PREM().pressure(depthrange)
# Computing adiabatic temperatures. T0 is a choice!
T0 = T0 # K
temperatures = geotherm.adiabatic(pressures, T0, rock)
print("Calculations are done for:")
rock.debug_print()
rock_vp, rock_vs, rock_rho = rock.evaluate(
['v_p', 'v_s', 'density'], pressures, temperatures)
# WRITE OUT FILE
f = open(filename, 'w')
print('Writing ' + filename + ' ...')
f.write(lines[0][:-2] + ' + ' + filename + '\n')
for line in lines[1:3]:
f.write(line[:-2] + '\n')
for i in range(indrange[0]):
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
for i in range(indrange[0], indrange[-1]):
ind2 = -1 - i + indrange[0]
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i],
rock_rho[ind2],
rock_vp[ind2],
rock_vs[ind2],
ref_Qk[i],
ref_Qmu[i],
rock_vp[ind2],
rock_vs[ind2],
ref_eta[i]))
for i in range(indrange[-1], len(ref_radius)):
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
f.close()
if plotting:
# plot vp
plt.plot(
ref_depth / 1.e3,
ref_vph / 1.e3,
color='g',
linestyle='-',
label='vp')
plt.plot(depthrange / 1.e3, rock_vp / 1.e3, color='g', linestyle='-',
marker='o', markerfacecolor='g', markersize=1)
# plot Vs
plt.plot(
ref_depth / 1.e3,
ref_vsh / 1.e3,
color='b',
linestyle='-',
label='vs')
plt.plot(depthrange / 1.e3, rock_vs / 1.e3, color='b', linestyle='-',
marker='o', markerfacecolor='b', markersize=1)
# plot density
plt.plot(
ref_depth / 1.e3,
ref_density / 1.e3,
color='r',
linestyle='-',
label='density')
plt.plot(depthrange / 1.e3, rock_rho / 1.e3, color='r', linestyle='-',
marker='o', markerfacecolor='r', markersize=1)
plt.title(filename + ' = ' + mineos_ref + ' replaced between ' +
str(min_depth / 1.e3) + ' and ' + str(max_depth / 1.e3) + ' km')
plt.legend(loc='lower right')
plt.show()
| gpl-2.0 |
andnovar/ggplot | ggplot/scales/scale_facet.py | 13 | 10175 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
# TODO: This is fairly repetiive and can definitely be
# condensed into a lot less code, but it's working for now
import numpy as np
import matplotlib.pyplot as plt
from .utils import calc_axis_breaks_and_limits
import sys
def scale_facet_wrap(rows, cols, positions, scaletype):
"""Set the scales on each subplot for wrapped faceting.
Parameters
----------
rows : int
number of rows in the faceted plot
cols : int
number of columns in the faceted plot
positions : list of int
zero-indexed list of faceted plot positions
scaletype : str or None
string indicating the type of scaling to apply to the rows and columns
- None : All plots get the same scale
- 'free_x' : each plot is free to determine its own x-scale, all plots have the same y-scale
- 'free_y' : each plot is free to determine its own y-scale, all plots have the same x-scale
- 'free' : plots are free to determine their own x- and y-scales
"""
x_extents, y_extents = {}, {}
# Calculate the extents for the plots
for pos in positions:
# Work on the subplot at the current position (adding 1 to pos because
# matplotlib 1-indexes their subplots)
plt.subplot(rows, cols, pos + 1)
# Update the x extents for each column
column, row = 0, 0
if scaletype in ["free", "free_x"]:
# If the x scale is free, all plots get their own x scale
column = pos % cols
row = int(pos / cols)
limits = plt.xlim()
# Get the current bounds for this column. Default lower limit is
# infinity (because all values < infinity) and the default upper limit
# is -infinity (because all values > -infinity).
lower, upper = x_extents.get((column, row), (float("inf"), float("-inf")))
lower = min(limits[0], lower)
upper = max(limits[1], upper)
x_extents[(column, row)] = (lower, upper)
column, row = 0, 0
if scaletype in ["free", "free_y"]:
# If the y scale is free, all plots get their own y scale
column = pos % cols
row = int(pos / cols)
limits = plt.ylim()
# Get the current bounds for this column. Default lower limit is
# infinity (because all values < infinity) and the default upper limit
# is -infinity (because all values > -infinity).
lower, upper = y_extents.get((column, row), (float("inf"), float("-inf")))
lower = min(limits[0], lower)
upper = max(limits[1], upper)
y_extents[(column, row)] = (lower, upper)
for pos in positions:
plt.subplot(rows, cols, pos + 1)
row = int(pos / cols)
column = pos % cols
# Find the extents for this position. Default to the extents at
# position column 0, row 0, in case all plots use the same scale
xmin, xmax = x_extents[(0, 0)]
ymin, ymax = y_extents[(0, 0)]
if scaletype in ["free", "free_x"]:
# If the x scale is free, look up the extents for this column and row
xmin, xmax = x_extents[(column, row)]
if scaletype in ["free", "free_y"]:
# If the y scale is free, look up the extents for this column and row
ymin, ymax = y_extents[(column, row)]
x_scale, x_min, x_max = calc_axis_breaks_and_limits(xmin, xmax, 4)
x_scale = np.round(x_scale, 2)
# Only apply x labels to plots if each plot has its own scale or the
# plot is in the bottom row of each column.
x_labs = []
if scaletype in ["free", "free_x"] or pos in positions[-cols:]:
x_labs = x_scale
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max )
# Set the y-axis scale and labels
y_scale, y_min, y_max = calc_axis_breaks_and_limits(ymin, ymax, 4)
y_scale = np.round(y_scale, 2)
# Only apply y labels to plots if each plot has its own scale or the
# plot is in the left column.
y_labs = []
if scaletype in ["free", "free_y"] or column == 0:
y_labs = y_scale
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
def scale_facet_grid(xdim, ydim, facet_pairs, scaletype):
# everyone gets the same scales
if scaletype is None:
min_x, max_x = 999999999, -999999999
min_y, max_y = 999999999, -999999999
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
min_x = min(min_x, min(plt.xlim()))
max_x = max(max_x, max(plt.xlim()))
min_y = min(min_y, min(plt.ylim()))
max_y = max(max_y, max(plt.ylim()))
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_y, max_y, 4)
y_scale = np.round(y_scale, 2)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_x, max_x, 4)
x_scale = np.round(x_scale, 2)
# for all axis set the individual axis limits and ticks
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
x_labs = x_scale
if pos <= (len(facet_pairs) - ydim):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
elif scaletype=="free_y":
min_x, max_x = 999999999, -999999999
min_ys, max_ys = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_bucket = int((pos-1) / ydim)
min_ys[y_bucket] = min_ys.get(y_bucket, 999999999)
max_ys[y_bucket] = max_ys.get(y_bucket, -999999999)
min_x = min(min_x, min(plt.xlim()))
max_x = max(max_x, max(plt.xlim()))
min_ys[y_bucket] = min(min_ys[y_bucket], min(plt.ylim()))
max_ys[y_bucket] = max(max_ys[y_bucket], max(plt.ylim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
y_bucket = int((pos-1) / ydim)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_ys[y_bucket], max_ys[y_bucket],4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_x, max_x, 4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= (len(facet_pairs) - ydim):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
elif scaletype=="free_x":
min_y, max_y = 999999999, -999999999
min_xs, max_xs = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
min_xs[x_bucket] = min_xs.get(x_bucket, 999999999)
max_xs[x_bucket] = max_xs.get(x_bucket, -999999999)
min_y = min(min_y, min(plt.ylim()))
max_y = max(max_y, max(plt.ylim()))
min_xs[x_bucket] = min(min_xs[x_bucket], min(plt.xlim()))
max_xs[x_bucket] = max(max_xs[x_bucket], max(plt.xlim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_xs[x_bucket], max_xs[x_bucket],4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= ((len(facet_pairs) - ydim)):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_y, max_y, 4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
else:
min_xs, max_xs = {}, {}
min_ys, max_ys = {}, {}
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
min_xs[x_bucket] = min_xs.get(x_bucket, 999999999)
max_xs[x_bucket] = max_xs.get(x_bucket, -999999999)
min_xs[x_bucket] = min(min_xs[x_bucket], min(plt.xlim()))
max_xs[x_bucket] = max(max_xs[x_bucket], max(plt.xlim()))
y_bucket = int((pos-1) / ydim)
min_ys[y_bucket] = min_ys.get(y_bucket, 999999999)
max_ys[y_bucket] = max_ys.get(y_bucket, -999999999)
min_ys[y_bucket] = min(min_ys[y_bucket], min(plt.ylim()))
max_ys[y_bucket] = max(max_ys[y_bucket], max(plt.ylim()))
for pos, _ in enumerate(facet_pairs):
pos += 1
plt.subplot(xdim, ydim, pos)
x_bucket = int((pos-1) / xdim)
x_scale, x_min, x_max = calc_axis_breaks_and_limits(min_xs[x_bucket], max_xs[x_bucket],4)
x_scale = np.round(x_scale, 2)
x_labs = x_scale
if pos <= ((len(facet_pairs) - ydim)):
x_labs = []
plt.xticks(x_scale, x_labs)
plt.xlim(x_min, x_max)
y_bucket = int((pos-1) / ydim)
y_scale, y_min, y_max = calc_axis_breaks_and_limits(min_ys[y_bucket], max_ys[y_bucket],4)
y_scale = np.round(y_scale, 2)
y_labs = y_scale
if pos % ydim!=1:
y_labs = []
plt.yticks(y_scale, y_labs)
plt.ylim(y_min, y_max)
| bsd-2-clause |
RachitKansal/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
Caronthir/FYS3150 | Project3/analysis/runner.py | 1 | 2355 | #!/bin/python3
# -*- coding: utf-8 -*-
import subprocess
import json
import numpy as np
import seaborn as sns
import shlex
sns.set(context='poster')
class Runner:
def __init__(self):
self.param_path = '../data/parameters.json'
self.exe_path = '../cpp/solve'
self.energy_path = '../data/energy.txt'
self.position_path = '../data/position.txt'
self.analysis_path = 'analyze.py'
self.original_params = self.load_parameters()
self.parameters = self.load_parameters()
def __enter__(self):
self.setup()
return self
def __exit__(self, type, value, traceback):
self.revert_parameters()
def __setitem__(self, *args):
return self.parameters.__setitem__(*args)
def __getitem__(self, *args):
return self.parameters.__getitem__(*args)
def get_planet(self, name):
for planet in self['planets']:
if name == planet['name']:
return planet
raise IndexError("{} does not exist".format(planet))
def run_simulation(self):
self.save_parameters(self.parameters)
process = subprocess.Popen(self.exe_path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
out, err = process.communicate()
return out.decode('ascii'), err.decode('ascii')
@staticmethod
def extract_time(out: str):
time = float(out.split()[-1][:-1]) # Ugly ass code. Regex doesn't work
return time
def run_analysis(self, arguments:str = '') -> None:
args = "python {} {}".format(self.analysis_path, arguments)
args = shlex.split(args)
process = subprocess.run(args)
def get_energy(self):
return np.loadtxt(self.energy_path)
def get_position(self):
position = np.loadtxt(self.position_path)
N, M = position.shape
return position.reshape(N, 3, M//3, order='F')
def revert_parameters(self):
self.save_parameters(self.original_params)
def load_parameters(self):
with open(self.param_path, 'r') as param_file:
param_json = json.load(param_file)
return param_json
def save_parameters(self, param_json: str):
with open(self.param_path, 'w') as param_file:
json.dump(param_json, param_file, indent=4)
| mit |
legacysurvey/pipeline | py/legacyanalysis/compare-two-catalogs.py | 2 | 18973 | from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from glob import glob
import os
import re
from astrometry.util.fits import fits_table, merge_tables
from astrometry.libkd.spherematch import match_radec
from astrometry.util.plotutils import PlotSequence
from tractor.brightness import NanoMaggies
import scipy.stats
'''
This is a little script for comparing two directories full of tractor
catalogs.
'''
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--name1', help='Name for first data set')
parser.add_argument('--name2', help='Name for second data set')
parser.add_argument('--plot-prefix', default='compare',
help='Prefix for plot filenames; default "%default"')
parser.add_argument('--match', default=1.0, type=float,
help='Astrometric cross-match distance in arcsec')
parser.add_argument('dir1', help='First directory to compare')
parser.add_argument('dir2', help='Second directory to compare')
opt = parser.parse_args()
ps = PlotSequence(opt.plot_prefix)
name1 = opt.name1
if name1 is None:
name1 = os.path.basename(opt.dir1)
if not len(name1):
name1 = os.path.basename(os.path.dirname(opt.dir1))
name2 = opt.name2
if name2 is None:
name2 = os.path.basename(opt.dir2)
if not len(name2):
name2 = os.path.basename(os.path.dirname(opt.dir2))
tt = 'Comparing %s to %s' % (name1, name2)
# regex for tractor-*.fits catalog filename
catre = re.compile('tractor-.*.fits')
cat1,cat2 = [],[]
for basedir,cat in [(opt.dir1, cat1), (opt.dir2, cat2)]:
for dirpath,dirnames,filenames in os.walk(basedir, followlinks=True):
for fn in filenames:
if not catre.match(fn):
print('Skipping', fn, 'due to filename')
continue
fn = os.path.join(dirpath, fn)
t = fits_table(fn)
print(len(t), 'from', fn)
cat.append(t)
cat1 = merge_tables(cat1, columns='fillzero')
cat2 = merge_tables(cat2, columns='fillzero')
print('Total of', len(cat1), 'from', name1)
print('Total of', len(cat2), 'from', name2)
cat1.cut(cat1.brick_primary)
cat2.cut(cat2.brick_primary)
print('Total of', len(cat1), 'BRICK_PRIMARY from', name1)
print('Total of', len(cat2), 'BRICK_PRIMARY from', name2)
# cat1.cut((cat1.decam_anymask[:,1] == 0) *
# (cat1.decam_anymask[:,2] == 0) *
# (cat1.decam_anymask[:,4] == 0))
# cat2.cut((cat2.decam_anymask[:,1] == 0) *
# (cat2.decam_anymask[:,2] == 0) *
# (cat2.decam_anymask[:,4] == 0))
# print('Total of', len(cat1), 'unmasked from', name1)
# print('Total of', len(cat2), 'unmasked from', name2)
I,J,d = match_radec(cat1.ra, cat1.dec, cat2.ra, cat2.dec, opt.match/3600.,
nearest=True)
print(len(I), 'matched')
plt.clf()
plt.hist(d * 3600., 100)
plt.xlabel('Match distance (arcsec)')
plt.title(tt)
ps.savefig()
matched1 = cat1[I]
matched2 = cat2[J]
matched1.type = np.array([t.strip() for t in matched1.type])
matched2.type = np.array([t.strip() for t in matched2.type])
# Confusion matrix for source types
types = ['PSF', 'SIMP', 'EXP', 'DEV', 'COMP']
confusion = np.zeros((len(types), len(types)))
labels = []
assert(len(set(np.unique(matched1.type)) - set(types)) == 0)
assert(len(set(np.unique(matched2.type)) - set(types)) == 0)
for i,t1 in enumerate(types):
I = np.flatnonzero(matched1.type == t1)
if len(I) == 0:
continue
for j,t2 in enumerate(types):
J = np.flatnonzero(matched2.type[I] == t2)
if len(J) == 0:
continue
confusion[i, j] = float(len(J)) / float(len(I))
labels.append((i, j, '%i/%i' % (len(J), len(I))))
plt.clf()
plt.imshow(confusion, interpolation='nearest', cmap=plt.cm.Blues, vmin=0, vmax=1)
for r,c,s in labels:
plt.text(c, r, s, color='k', ha='center', fontsize=8)
plt.xticks(range(len(types)), types)
plt.yticks(range(len(types)), types)
plt.ylabel(name1)
plt.xlabel(name2)
ps.savefig()
plt.clf()
I = np.flatnonzero((matched1.type == 'PSF') * (matched2.type == 'PSF'))
print(len(I), 'PSF to PSF')
plt.plot(matched1.dchisq[I,0] - matched1.dchisq[I,1],
matched2.dchisq[I,0] - matched2.dchisq[I,1], 'k.', label='PSF to PSF')
I = np.flatnonzero((matched1.type == 'PSF') * (matched2.type == 'SIMP'))
print(len(I), 'PSF to SIMP')
plt.plot(matched1.dchisq[I,0] - matched1.dchisq[I,1],
matched2.dchisq[I,0] - matched2.dchisq[I,1], 'r.', label='PSF to SIMP')
I = np.flatnonzero((matched1.type == 'SIMP') * (matched2.type == 'PSF'))
print(len(I), 'SIMP to PSF')
plt.plot(matched1.dchisq[I,0] - matched1.dchisq[I,1],
matched2.dchisq[I,0] - matched2.dchisq[I,1], 'g.', label='SIMP to PSF')
I = np.flatnonzero((matched1.type == 'SIMP') * (matched2.type == 'SIMP'))
print(len(I), 'SIMP to SIMP')
plt.plot(matched1.dchisq[I,0] - matched1.dchisq[I,1],
matched2.dchisq[I,0] - matched2.dchisq[I,1], 'b.', label='SIMP to SIMP')
plt.xlabel('%s dchisq: PSF - SIMP' % name1)
plt.ylabel('%s dchisq: PSF - SIMP' % name2)
plt.legend(loc='upper left')
#plt.xscale('symlog')
#plt.yscale('symlog')
plt.plot([-20,20],[-20,20], 'k-', alpha=0.5)
plt.axhline(0, color='k', alpha=0.5)
plt.axvline(0, color='k', alpha=0.5)
plt.axis([-20,20,-20,20])
ps.savefig()
plt.clf()
I = np.flatnonzero((matched1.type == 'EXP') * (matched2.type == 'EXP'))
plt.plot(matched1.shapeexp_r[I], matched2.shapeexp_r[I], 'r.', label='exp')
I = np.flatnonzero((matched1.type == 'DEV') * (matched2.type == 'DEV'))
plt.plot(matched1.shapedev_r[I], matched2.shapedev_r[I], 'b.', label='dev')
plt.xlabel('%s radius (arcsec)' % name1)
plt.ylabel('%s radius (arcsec)' % name2)
plt.axis([0,4,0,4])
plt.legend()
ps.savefig()
for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
K = np.flatnonzero((matched1.decam_flux_ivar[:,iband] > 0) *
(matched2.decam_flux_ivar[:,iband] > 0))
print('Median mw_trans', band, 'is',
np.median(matched1.decam_mw_transmission[:,iband]))
plt.clf()
plt.errorbar(matched1.decam_flux[K,iband],
matched2.decam_flux[K,iband],
fmt='.', color=cc,
xerr=1./np.sqrt(matched1.decam_flux_ivar[K,iband]),
yerr=1./np.sqrt(matched2.decam_flux_ivar[K,iband]),
alpha=0.1,
)
plt.xlabel('%s flux: %s' % (name1, band))
plt.ylabel('%s flux: %s' % (name2, band))
plt.plot([-1e6, 1e6], [-1e6,1e6], 'k-', alpha=1.)
plt.axis([-100, 1000, -100, 1000])
plt.title(tt)
ps.savefig()
for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
good = ((matched1.decam_flux_ivar[:,iband] > 0) *
(matched2.decam_flux_ivar[:,iband] > 0))
K = np.flatnonzero(good)
psf1 = (matched1.type == 'PSF ')
psf2 = (matched2.type == 'PSF ')
P = np.flatnonzero(good * psf1 * psf2)
mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
iv1 = matched1.decam_flux_ivar[:, iband]
iv2 = matched2.decam_flux_ivar[:, iband]
std = np.sqrt(1./iv1 + 1./iv2)
plt.clf()
plt.plot(mag1[K],
(matched2.decam_flux[K,iband] - matched1.decam_flux[K,iband]) / std[K],
'.', alpha=0.1, color=cc)
plt.plot(mag1[P],
(matched2.decam_flux[P,iband] - matched1.decam_flux[P,iband]) / std[P],
'.', alpha=0.1, color='k')
plt.ylabel('(%s - %s) flux / flux errors (sigma): %s' % (name2, name1, band))
plt.xlabel('%s mag: %s' % (name1, band))
plt.axhline(0, color='k', alpha=0.5)
plt.axis([24, 16, -10, 10])
plt.title(tt)
ps.savefig()
plt.clf()
lp,lt = [],[]
for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
good = ((matched1.decam_flux_ivar[:,iband] > 0) *
(matched2.decam_flux_ivar[:,iband] > 0))
#good = True
psf1 = (matched1.type == 'PSF ')
psf2 = (matched2.type == 'PSF ')
mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
iv1 = matched1.decam_flux_ivar[:, iband]
iv2 = matched2.decam_flux_ivar[:, iband]
std = np.sqrt(1./iv1 + 1./iv2)
#std = np.hypot(std, 0.01)
G = np.flatnonzero(good * psf1 * psf2 *
np.isfinite(mag1) *
(mag1 >= 20) * (mag1 < dict(g=24, r=23.5, z=22.5)[band]))
n,b,p = plt.hist((matched2.decam_flux[G,iband] -
matched1.decam_flux[G,iband]) / std[G],
range=(-4, 4), bins=50, histtype='step', color=cc,
normed=True)
sig = (matched2.decam_flux[G,iband] -
matched1.decam_flux[G,iband]) / std[G]
print('Raw mean and std of points:', np.mean(sig), np.std(sig))
med = np.median(sig)
rsigma = (np.percentile(sig, 84) - np.percentile(sig, 16)) / 2.
print('Median and percentile-based sigma:', med, rsigma)
lp.append(p[0])
lt.append('%s: %.2f +- %.2f' % (band, med, rsigma))
bins = []
gaussint = []
for blo,bhi in zip(b, b[1:]):
c = scipy.stats.norm.cdf(bhi) - scipy.stats.norm.cdf(blo)
c /= (bhi - blo)
#bins.extend([blo,bhi])
#gaussint.extend([c,c])
bins.append((blo+bhi)/2.)
gaussint.append(c)
plt.plot(bins, gaussint, 'k-', lw=2, alpha=0.5)
plt.title(tt)
plt.xlabel('Flux difference / error (sigma)')
plt.axvline(0, color='k', alpha=0.1)
plt.ylim(0, 0.45)
plt.legend(lp, lt, loc='upper right')
ps.savefig()
for iband,band,cc in [(1,'g','g'),(2,'r','r'),(4,'z','m')]:
plt.clf()
mag1, magerr1 = NanoMaggies.fluxErrorsToMagErrors(
matched1.decam_flux[:,iband], matched1.decam_flux_ivar[:,iband])
mag2, magerr2 = NanoMaggies.fluxErrorsToMagErrors(
matched2.decam_flux[:,iband], matched2.decam_flux_ivar[:,iband])
meanmag = NanoMaggies.nanomaggiesToMag((
matched1.decam_flux[:,iband] + matched2.decam_flux[:,iband]) / 2.)
psf1 = (matched1.type == 'PSF ')
psf2 = (matched2.type == 'PSF ')
good = ((matched1.decam_flux_ivar[:,iband] > 0) *
(matched2.decam_flux_ivar[:,iband] > 0) *
np.isfinite(mag1) * np.isfinite(mag2))
K = np.flatnonzero(good)
P = np.flatnonzero(good * psf1 * psf2)
plt.errorbar(mag1[K], mag2[K], fmt='.', color=cc,
xerr=magerr1[K], yerr=magerr2[K], alpha=0.1)
plt.plot(mag1[P], mag2[P], 'k.', alpha=0.5)
plt.xlabel('%s %s (mag)' % (name1, band))
plt.ylabel('%s %s (mag)' % (name2, band))
plt.plot([-1e6, 1e6], [-1e6,1e6], 'k-', alpha=1.)
plt.axis([24, 16, 24, 16])
plt.title(tt)
ps.savefig()
plt.clf()
plt.errorbar(mag1[K], mag2[K] - mag1[K], fmt='.', color=cc,
xerr=magerr1[K], yerr=magerr2[K], alpha=0.1)
plt.plot(mag1[P], mag2[P] - mag1[P], 'k.', alpha=0.5)
plt.xlabel('%s %s (mag)' % (name1, band))
plt.ylabel('%s %s - %s %s (mag)' % (name2, band, name1, band))
plt.axhline(0., color='k', alpha=1.)
plt.axis([24, 16, -1, 1])
plt.title(tt)
ps.savefig()
magbins = np.arange(16, 24.001, 0.5)
plt.clf()
plt.plot(mag1[K], (mag2[K]-mag1[K]) / np.hypot(magerr1[K], magerr2[K]),
'.', color=cc, alpha=0.1)
plt.plot(mag1[P], (mag2[P]-mag1[P]) / np.hypot(magerr1[P], magerr2[P]),
'k.', alpha=0.5)
plt.xlabel('%s %s (mag)' % (name1, band))
plt.ylabel('(%s %s - %s %s) / errors (sigma)' %
(name2, band, name1, band))
plt.axhline(0., color='k', alpha=1.)
plt.axis([24, 16, -10, 10])
plt.title(tt)
ps.savefig()
y = (mag2 - mag1) / np.hypot(magerr1, magerr2)
plt.clf()
plt.plot(meanmag[P], y[P], 'k.', alpha=0.1)
midmag = []
vals = np.zeros((len(magbins)-1, 5))
median_err1 = []
iqd_gauss = scipy.stats.norm.ppf(0.75) - scipy.stats.norm.ppf(0.25)
# FIXME -- should we do some stats after taking off the mean difference?
for bini,(mlo,mhi) in enumerate(zip(magbins, magbins[1:])):
I = P[(meanmag[P] >= mlo) * (meanmag[P] < mhi)]
midmag.append((mlo+mhi)/2.)
median_err1.append(np.median(magerr1[I]))
if len(I) == 0:
continue
# median and +- 1 sigma quantiles
ybin = y[I]
vals[bini,0] = np.percentile(ybin, 16)
vals[bini,1] = np.median(ybin)
vals[bini,2] = np.percentile(ybin, 84)
# +- 2 sigma quantiles
vals[bini,3] = np.percentile(ybin, 2.3)
vals[bini,4] = np.percentile(ybin, 97.7)
iqd = np.percentile(ybin, 75) - np.percentile(ybin, 25)
print('Mag bin', midmag[-1], ': IQD is factor', iqd / iqd_gauss,
'vs expected for Gaussian;', len(ybin), 'points')
# if iqd > iqd_gauss:
# # What error adding in quadrature would you need to make the IQD match?
# err = median_err1[-1]
# target_err = err * (iqd / iqd_gauss)
# sys_err = np.sqrt(target_err**2 - err**2)
# print('--> add systematic error', sys_err)
# ~ Johan's cuts
mlo = 21.
mhi = dict(g=24., r=23.5, z=22.5)[band]
I = P[(meanmag[P] >= mlo) * (meanmag[P] < mhi)]
ybin = y[I]
iqd = np.percentile(ybin, 75) - np.percentile(ybin, 25)
print('Mag bin', mlo, mhi, 'band', band, ': IQD is factor',
iqd / iqd_gauss, 'vs expected for Gaussian;', len(ybin), 'points')
if iqd > iqd_gauss:
# What error adding in quadrature would you need to make
# the IQD match?
err = np.median(np.hypot(magerr1[I], magerr2[I]))
print('Median error (hypot):', err)
target_err = err * (iqd / iqd_gauss)
print('Target:', target_err)
sys_err = np.sqrt((target_err**2 - err**2) / 2.)
print('--> add systematic error', sys_err)
# check...
err_sys = np.hypot(np.hypot(magerr1, sys_err),
np.hypot(magerr2, sys_err))
ysys = (mag2 - mag1) / err_sys
ysys = ysys[I]
print('Resulting median error:', np.median(err_sys[I]))
iqd_sys = np.percentile(ysys, 75) - np.percentile(ysys, 25)
print('--> IQD', iqd_sys / iqd_gauss, 'vs Gaussian')
# Hmmm, this doesn't work... totally overshoots.
plt.errorbar(midmag, vals[:,1], fmt='o', color='b',
yerr=(vals[:,1]-vals[:,0], vals[:,2]-vals[:,1]),
capthick=3, zorder=20)
plt.errorbar(midmag, vals[:,1], fmt='o', color='b',
yerr=(vals[:,1]-vals[:,3], vals[:,4]-vals[:,1]),
capthick=2, zorder=20)
plt.axhline( 1., color='b', alpha=0.2)
plt.axhline(-1., color='b', alpha=0.2)
plt.axhline( 2., color='b', alpha=0.2)
plt.axhline(-2., color='b', alpha=0.2)
for mag,err,y in zip(midmag, median_err1, vals[:,3]):
if not np.isfinite(err):
continue
if y < -6:
continue
plt.text(mag, y-0.1, '%.3f' % err, va='top', ha='center', color='k',
fontsize=10)
plt.xlabel('(%s + %s)/2 %s (mag), PSFs' % (name1, name2, band))
plt.ylabel('(%s %s - %s %s) / errors (sigma)' %
(name2, band, name1, band))
plt.axhline(0., color='k', alpha=1.)
plt.axvline(21, color='k', alpha=0.3)
plt.axvline(dict(g=24, r=23.5, z=22.5)[band], color='k', alpha=0.3)
plt.axis([24.1, 16, -6, 6])
plt.title(tt)
ps.savefig()
#magbins = np.append([16, 18], np.arange(20, 24.001, 0.5))
if band == 'g':
magbins = [20, 24]
elif band == 'r':
magbins = [20, 23.5]
elif band == 'z':
magbins = [20, 22.5]
slo,shi = -5,5
plt.clf()
ha = dict(bins=25, range=(slo,shi), histtype='step', normed=True)
y = (mag2 - mag1) / np.hypot(magerr1, magerr2)
midmag = []
nn = []
rgbs = []
lt,lp = [],[]
for bini,(mlo,mhi) in enumerate(zip(magbins, magbins[1:])):
I = P[(mag1[P] >= mlo) * (mag1[P] < mhi)]
if len(I) == 0:
continue
ybin = y[I]
rgb = [0.,0.,0.]
rgb[0] = float(bini) / (len(magbins)-1)
rgb[2] = 1. - rgb[0]
n,b,p = plt.hist(ybin, color=rgb, **ha)
lt.append('mag %g to %g' % (mlo,mhi))
lp.append(p[0])
midmag.append((mlo+mhi)/2.)
nn.append(n)
rgbs.append(rgb)
bins = []
gaussint = []
for blo,bhi in zip(b, b[1:]):
#midbin.append((blo+bhi)/2.)
#gaussint.append(scipy.stats.norm.cdf(bhi) -
# scipy.stats.norm.cdf(blo))
c = scipy.stats.norm.cdf(bhi) - scipy.stats.norm.cdf(blo)
c /= (bhi - blo)
bins.extend([blo,bhi])
gaussint.extend([c,c])
plt.plot(bins, gaussint, 'k-', lw=2, alpha=0.5)
plt.legend(lp, lt)
plt.title(tt)
plt.xlim(slo,shi)
ps.savefig()
bincenters = b[:-1] + (b[1]-b[0])/2.
plt.clf()
lp = []
for n,rgb,mlo,mhi in zip(nn, rgbs, magbins, magbins[1:]):
p = plt.plot(bincenters, n, '-', color=rgb)
lp.append(p[0])
plt.plot(bincenters, gaussint[::2], 'k-', alpha=0.5, lw=2)
plt.legend(lp, lt)
plt.title(tt)
plt.xlim(slo,shi)
ps.savefig()
if __name__ == '__main__':
main()
| gpl-2.0 |
xuewei4d/scikit-learn | sklearn/metrics/_regression.py | 6 | 31872 | """Metrics to assess performance on regression task.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Karan Desai <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# Christian Lorentzen <[email protected]>
# Ashutosh Hathidara <[email protected]>
# License: BSD 3 clause
import numpy as np
import warnings
from .._loss.glm_distribution import TweedieDistribution
from ..utils.validation import (check_array, check_consistent_length,
_num_samples)
from ..utils.validation import column_or_1d
from ..utils.validation import _deprecate_positional_args
from ..utils.validation import _check_sample_weight
from ..utils.stats import _weighted_percentile
from ..exceptions import UndefinedMetricWarning
__ALL__ = [
"max_error",
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
"mean_absolute_percentage_error",
"r2_score",
"explained_variance_score",
"mean_tweedie_deviance",
"mean_poisson_deviance",
"mean_gamma_deviance",
]
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
"""Check that y_true and y_pred belong to the same regression task.
Parameters
----------
y_true : array-like
y_pred : array-like
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'.
y_true : array-like of shape (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
dtype : str or list, default="numeric"
the dtype argument passed to check_array.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str,
multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
@_deprecate_positional_args
def mean_absolute_error(y_true, y_pred, *,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss.
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.85...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_absolute_percentage_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute percentage error regression loss.
Note here that we do not represent the output as a percentage in range
[0, 100]. Instead, we represent it in range [0, 1/eps]. Read more in the
:ref:`User Guide <mean_absolute_percentage_error>`.
.. versionadded:: 0.24
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
If input is list then the shape must be (n_outputs,).
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats in the range [0, 1/eps]
If multioutput is 'raw_values', then mean absolute percentage error
is returned for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAPE output is non-negative floating point. The best value is 0.0.
But note the fact that bad predictions can lead to arbitarily large
MAPE values, especially if some y_true values are very close to zero.
Note that we return a large value instead of `inf` when y_true is zero.
Examples
--------
>>> from sklearn.metrics import mean_absolute_percentage_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.3273...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.5515...
>>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.6198...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
epsilon = np.finfo(np.float64).eps
mape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon)
output_errors = np.average(mape,
weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
@_deprecate_positional_args
def mean_squared_error(y_true, y_pred, *,
sample_weight=None,
multioutput='uniform_average', squared=True):
"""Mean squared error regression loss.
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE value.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred, squared=False)
0.612...
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred)
0.708...
>>> mean_squared_error(y_true, y_pred, squared=False)
0.822...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
array([0.41666667, 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.825...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if not squared:
output_errors = np.sqrt(output_errors)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
@_deprecate_positional_args
def mean_squared_log_error(y_true, y_pred, *,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared logarithmic error regression loss.
Read more in the :ref:`User Guide <mean_squared_log_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> mean_squared_log_error(y_true, y_pred)
0.039...
>>> y_true = [[0.5, 1], [1, 2], [7, 6]]
>>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
>>> mean_squared_log_error(y_true, y_pred)
0.044...
>>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
array([0.00462428, 0.08377444])
>>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.060...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
if (y_true < 0).any() or (y_pred < 0).any():
raise ValueError("Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values.")
return mean_squared_error(np.log1p(y_true), np.log1p(y_pred),
sample_weight=sample_weight,
multioutput=multioutput)
@_deprecate_positional_args
def median_absolute_error(y_true, y_pred, *, multioutput='uniform_average',
sample_weight=None):
"""Median absolute error regression loss.
Median absolute error output is non-negative floating point. The best value
is 0.0. Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values. Array-like value defines
weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.24
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> median_absolute_error(y_true, y_pred)
0.75
>>> median_absolute_error(y_true, y_pred, multioutput='raw_values')
array([0.5, 1. ])
>>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.85
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is None:
output_errors = np.median(np.abs(y_pred - y_true), axis=0)
else:
sample_weight = _check_sample_weight(sample_weight, y_pred)
output_errors = _weighted_percentile(np.abs(y_pred - y_true),
sample_weight=sample_weight)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
@_deprecate_positional_args
def explained_variance_score(y_true, y_pred, *,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function.
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'} or \
array-like of shape (n_outputs,), default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred)
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, str):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
@_deprecate_positional_args
def r2_score(y_true, y_pred, *, sample_weight=None,
multioutput="uniform_average"):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average', 'variance_weighted'}, \
array-like of shape (n_outputs,) or None, default='uniform_average'
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred)
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred,
... multioutput='variance_weighted')
0.938...
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 2, 3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1, 2, 3]
>>> y_pred = [2, 2, 2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1, 2, 3]
>>> y_pred = [3, 2, 1]
>>> r2_score(y_true, y_pred)
-3.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
if _num_samples(y_pred) < 2:
msg = "R^2 score is not well-defined with less than two samples."
warnings.warn(msg, UndefinedMetricWarning)
return float('nan')
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, str):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def max_error(y_true, y_pred):
"""
max_error metric calculates the maximum residual error.
Read more in the :ref:`User Guide <max_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
Returns
-------
max_error : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import max_error
>>> y_true = [3, 2, 7, 1]
>>> y_pred = [4, 2, 7, 1]
>>> max_error(y_true, y_pred)
1
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, None)
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in max_error")
return np.max(np.abs(y_true - y_pred))
@_deprecate_positional_args
def mean_tweedie_deviance(y_true, y_pred, *, sample_weight=None, power=0):
"""Mean Tweedie deviance regression loss.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
power : float, default=0
Tweedie power parameter. Either power <= 0 or power >= 1.
The higher `p` the less weight is given to extreme
deviations between true and predicted targets.
- power < 0: Extreme stable distribution. Requires: y_pred > 0.
- power = 0 : Normal distribution, output corresponds to
mean_squared_error. y_true and y_pred can be any real numbers.
- power = 1 : Poisson distribution. Requires: y_true >= 0 and
y_pred > 0.
- 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
and y_pred > 0.
- power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
- power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
and y_pred > 0.
- otherwise : Positive stable distribution. Requires: y_true > 0
and y_pred > 0.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_tweedie_deviance
>>> y_true = [2, 0, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_tweedie_deviance(y_true, y_pred, power=1)
1.4260...
"""
y_type, y_true, y_pred, _ = _check_reg_targets(
y_true, y_pred, None, dtype=[np.float64, np.float32])
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in mean_tweedie_deviance")
check_consistent_length(y_true, y_pred, sample_weight)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = sample_weight[:, np.newaxis]
dist = TweedieDistribution(power=power)
dev = dist.unit_deviance(y_true, y_pred, check_input=True)
return np.average(dev, weights=sample_weight)
@_deprecate_positional_args
def mean_poisson_deviance(y_true, y_pred, *, sample_weight=None):
"""Mean Poisson deviance regression loss.
Poisson deviance is equivalent to the Tweedie deviance with
the power parameter `power=1`.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values. Requires y_true >= 0.
y_pred : array-like of shape (n_samples,)
Estimated target values. Requires y_pred > 0.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_poisson_deviance
>>> y_true = [2, 0, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_poisson_deviance(y_true, y_pred)
1.4260...
"""
return mean_tweedie_deviance(
y_true, y_pred, sample_weight=sample_weight, power=1
)
@_deprecate_positional_args
def mean_gamma_deviance(y_true, y_pred, *, sample_weight=None):
"""Mean Gamma deviance regression loss.
Gamma deviance is equivalent to the Tweedie deviance with
the power parameter `power=2`. It is invariant to scaling of
the target variable, and measures relative errors.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values. Requires y_true > 0.
y_pred : array-like of shape (n_samples,)
Estimated target values. Requires y_pred > 0.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_gamma_deviance
>>> y_true = [2, 0.5, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_gamma_deviance(y_true, y_pred)
1.0568...
"""
return mean_tweedie_deviance(
y_true, y_pred, sample_weight=sample_weight, power=2
)
| bsd-3-clause |
coolshaker/taxi_project | source_censustract_pickup/mapper.py | 2 | 2224 | #!/usr/bin/env python
import sys
sys.path.append('.')
import matplotlib
matplotlib.use('Agg')
from matplotlib.path import Path
from rtree import index as rtree
import numpy, shapefile, time
def findNeighborhood(location, index, neighborhoods):
match = index.intersection((location[0], location[1], location[0], location[1]))
for a in match:
if any(map(lambda x: x.contains_point(location), neighborhoods[a][1])):
return a
return -1
def readNeighborhood(shapeFilename, index, neighborhoods):
sf = shapefile.Reader(shapeFilename)
for sr in sf.shapeRecords():
# if sr.record[1] not in ['New York', 'Kings', 'Queens', 'Bronx']: continue
paths = map(Path, numpy.split(sr.shape.points, sr.shape.parts[1:]))
bbox = paths[0].get_extents()
map(bbox.update_from_path, paths[1:])
index.insert(len(neighborhoods), list(bbox.get_points()[0])+list(bbox.get_points()[1]))
neighborhoods.append((sr.record[3], paths))
neighborhoods.append(('UNKNOWN', None))
def parseInput():
for line in sys.stdin:
line = line.strip('\n')
values = line.split(',')
if len(values)>1 and values[0]!='medallion':
yield values
def mapper():
lng_id = 10 #pickup_lon
lat_id = 11 #pickup_lat
pickup_datetime = 5
index = rtree.Index()
neighborhoods = []
readNeighborhood('NYC_Census_Tract.shp', index, neighborhoods)
agg = {}
for values in parseInput():
try:
pickup_location = (float(values[lng_id]), float(values[lat_id]))
pickup_neighborhood = findNeighborhood(pickup_location, index, neighborhoods)
if pickup_neighborhood!=-1:
pickup_time = time.strptime(values[5], '%Y-%m-%d %H:%M:%S')
year_month = str(pickup_time.tm_year) + ('%02d' % pickup_time.tm_mon)
geoid = neighborhoods[pickup_neighborhood][0]
# print "%s\t%s" %( geoid, year_month)
key = "%s,%s" %( geoid, year_month)
agg[key] = agg.get(key, 0) + 1
except:
pass
for item in agg.iteritems():
print '%s\t%s' % item
if __name__=='__main__':
mapper()
| mit |
meduz/scikit-learn | sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
MJuddBooth/pandas | pandas/plotting/_style.py | 2 | 5763 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
from contextlib import contextmanager
import warnings
import numpy as np
import pandas.compat as compat
from pandas.compat import lmap, lrange
from pandas.core.dtypes.common import is_list_like
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = list(color) if is_list_like(color) else color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
try:
colors = [c['color']
for c in list(plt.rcParams['axes.prop_cycle'])]
except KeyError:
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
colors = colors[0:num_colors]
elif color_type == 'random':
import pandas.core.common as com
def random_color(column):
""" Returns a random color represented as a list of length 3"""
# GH17525 use common._random_state to avoid resetting the seed
rs = com.random_state(column)
return rs.rand(3).tolist()
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertible to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertible to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
hex_color = [c['color']
for c in list(plt.rcParams['axes.prop_cycle'])]
colors = [hex_color[int(colors[1])]]
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
# Append more colors by cycling if there is not enough color.
# Extra colors will be ignored by matplotlib if there are more colors
# than needed and nothing needs to be done here.
if len(colors) < num_colors:
try:
multiple = num_colors // len(colors) - 1
except ZeroDivisionError:
raise ValueError("Invalid color argument: ''")
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self, deprecated=False):
self._deprecated = deprecated
# self['xaxis.compat'] = False
super(_Options, self).__setitem__('xaxis.compat', False)
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError(
'{key} is not a valid pandas plotting option'.format(key=key))
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError(
'Cannot remove default parameter {key}'.format(key=key))
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
| bsd-3-clause |
prheenan/Research | Personal/EventDetection/Util/Analysis.py | 1 | 31350 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys,warnings,copy
from scipy import interpolate
from scipy.stats import norm
from scipy.ndimage.filters import uniform_filter1d,generic_filter1d
from scipy.integrate import cumtrapz
class simple_fec:
def __init__(self,time,z_sensor,separation,force,trigger_time,
dwell_time,events=[]):
self.Time = time
self.ZSnsr = z_sensor
self.Separation = separation
self.Force = force
self.TriggerTime = trigger_time
self.DwellTime = dwell_time
self.Events = events
class split_force_extension:
"""
class representing a force-extension curve, split into approach, dwell,
and retract
"""
def __init__(self,approach,dwell,retract,tau_num_points=None):
self.approach = approach
self.dwell = dwell
self.retract = retract
self.set_tau_num_points(tau_num_points)
self.retract_knots = None
self.epsilon = None
self.sigma = None
def set_retract_knots(self,interpolator):
"""
sets the retract knots; useful for gridding data
"""
self.retract_knots = interpolator.get_knots()
def get_epsilon_and_sigma(self):
return self.epsilon,self.sigma
def set_espilon_and_sigma(self,epsilon,sigma):
self.epsilon =epsilon
self.sigma = sigma
def set_approach_metrics(self,slice_to_fit,interpolator):
self.cached_approach_interpolator = interpolator
self.cached_approach_slice_to_fit = slice_to_fit
def _approach_metrics(self,n_points=None,slice_fit_approach=None):
if (n_points is None):
n_points = self.tau_num_points_approach
if (slice_fit_approach is None):
approach_surface_idx = self.get_predicted_approach_surface_index()
slice_fit_approach= slice(0,approach_surface_idx,1)
spline_fit_approach = \
self.approach_spline_interpolator(slice_to_fit=slice_fit_approach)
approach = self.approach
approach_time_fit = approach.Time[slice_fit_approach]
approach_force_sliced = approach.Force[slice_fit_approach]
approach_force_interp_sliced = spline_fit_approach(approach_time_fit)
# get the residual properties of the approach
stdevs,epsilon,sigma = \
stdevs_epsilon_sigma(approach_force_sliced,
approach_force_interp_sliced,n_points)
return stdevs,epsilon,sigma,slice_fit_approach,spline_fit_approach
def stdevs_epsilon_and_sigma(self,**kwargs):
stdevs,epsilon,sigma,slice_fit_approach,spline_fit_approach = \
self._approach_metrics(**kwargs)
return stdevs,epsilon,sigma
def retract_spline_interpolator(self,slice_to_fit=None,knots=None,**kwargs):
"""
returns an interpolator for force based on the stored time constant tau
for the retract force versus time curbe
Args:
slice_to_fit: which part of the retract to fit
knots: where to put the spline knots. if none, defaults to
self.retract_knots (which could also be none; then just uniform)
kwargs: passed to spline_interpolator
"""
if (slice_to_fit is None):
slice_to_fit = slice(0,self.retract.Time.size-1,1)
if knots is None:
knots = self.retract_knots
if (knots is not None):
# POST: actually have some knots; find the ones we can use
x = self.retract.Time
start = slice_to_fit.start
stop = slice_to_fit.stop
if stop is None:
stop = -1
condition = ((knots >= x[start]) & (knots <= x[stop]))
good_idx = np.where(condition)[0]
if (good_idx.size == 0):
err_str = "No valid knots! Analysis.retract_spline_interpolator"
warnings.warn(err_str, DeprecationWarning)
# give up on whatever we were trying to do
knots = None
else:
knots = knots[good_idx]
return spline_fit_fec(self.tau,self.retract,slice_to_fit=slice_to_fit,
knots=knots,**kwargs)
def approach_spline_interpolator(self,slice_to_fit=None,**kwargs):
"""
See retract_spline_interpolator, but for the approach
"""
tau_approach = self.tau_num_points_approach*self.dt
return spline_fit_fec(tau_approach,self.approach,
slice_to_fit=slice_to_fit,**kwargs)
def retract_separation_interpolator(self,**kwargs):
"""
returns an interpolator for separation based on the stored time
constant tau for the retract force versus time curbe
Args:
kwargs: passed to spline_interpolator
"""
x,f = self.retract.Time,self.retract.Separation
return spline_interpolator(self.tau,x,f,**kwargs)
def set_tau_num_points_approach(self,tau_num_points):
"""
sets the approach number of points for tau (may be different
due to different loading rates, etc)
Args:
tau_num_points: number of points to use
Returns:
nothing, sets tau appropriately
"""
self.tau_num_points_approach = tau_num_points
def set_tau_num_points(self,tau_num_points):
"""
sets the autocorrelation time associated with this curve
Args:
tau_num_points: integer number of points
Returns:
Nothing
"""
self.tau_num_points = tau_num_points
if (tau_num_points is not None):
# we assume the rate of time sampling is the same everywhere
self.dt = np.median(np.diff(self.approach.Time))
self.tau = self.dt*tau_num_points
else:
self.tau = None
def zero_retract_force(self,offset):
self.retract.Force -= offset
def zero_all(self,separation,zsnsr,force,force_retract):
"""
zeros the distance and force of the approach,dwell, and retract
Args:
separation,zsnsr,force: offsets in their respective categories
"""
self.approach.offset(separation,zsnsr,force)
self.dwell.offset(separation,zsnsr,force)
self.retract.offset(separation,zsnsr,force_retract)
def flip_forces(self):
"""
multiplies all the forces by -1; useful after offsetting
"""
self.approach.Force *= -1
self.dwell.Force *= -1
self.retract.Force *= -1
def n_points_approach_dwell(self):
"""
Returns:
the number of points in the approach and dwell curves
"""
return self.approach.Force.size + self.dwell.Force.size
def get_retract_event_idx(self):
"""
gets the slices of events *relative to the retract* (ie: idx 0 is
the first point in the retract curve)
Returns:
list, each element is a slice like (start,stop,1) where start and
stop are the event indices
"""
offset = self.n_points_approach_dwell()
# each event is a start/end tuple, so we just offset the min and max
idx = [ slice(min(ev)-offset,max(ev)-offset,1)
for ev in self.retract.Events]
return idx
def has_events(self):
return len(self.retract.Events) > 0
def get_retract_event_slices(self):
event_idx_retract = self.get_retract_event_centers()
starts = [0] + event_idx_retract
ends = event_idx_retract + [None]
slices = [slice(i,f,1) for i,f in zip(starts,ends)]
return slices
def get_retract_event_starts(self):
"""
get the start to all the events
"""
return [ i.start for i in self.get_retract_event_idx()]
def get_retract_event_centers(self):
"""
Returns:
the mean of the event start and stop (its 'center')
"""
get_mean = lambda ev: int(np.round(np.mean([ev.start,ev.stop]) ))
return [ get_mean(ev) for ev in self.get_retract_event_idx()]
def surface_distance_from_trigger(self):
"""
returns the distance in separtion units from the trigger point
"""
return abs(min(self.approach.Separation))
def get_predicted_approach_surface_index(self):
"""
returns the predicted place the surface is on the approach
"""
return np.where(self.approach.Force >0)[0][-1]
def get_predicted_retract_surface_index(self):
"""
Assuming this have been zeroed, get the predicted retract surface index
"""
approach_idx = self.get_predicted_approach_surface_index()
offset_points = self.approach.Force.size-approach_idx
# assume the surface is at the same point; convert from idx to real
# units by getting the ratio of separation differences
grad_appr = np.gradient(self.approach.Zsnsr)
grad_retr = np.gradient(self.retract.Zsnsr)
sep_diff_median_ratio = abs(np.median(grad_retr)/np.median(grad_appr))
ratio = int(np.ceil(offset_points/sep_diff_median_ratio))
"""
plt.subplot(2,1,1)
plt.plot(self.approach.Force)
plt.axvline(approach_idx)
plt.subplot(2,1,2)
n = self.approach.Force.size
plt.plot(self.retract.Force)
plt.axvline(n-approach_idx)
plt.axvline(ratio)
print(ratio)
plt.show()
"""
return ratio
def _index_surface_relative(x,offset_needed):
"""
returns a crude estimate for the predicted index offset for the surface
Args:
x: the time series of separation
offset_needed: the x offset
Returns:
number of points for x to displace by offset_needed
"""
sep_diff = np.median(np.abs(np.diff(x)))
n_points = int(np.ceil(offset_needed/sep_diff))
return n_points
def spline_fit_fec(tau,time_sep_force,slice_to_fit=None,**kwargs):
"""
returns an interpolator object on the given TimeSepForce object
Args:
tau: see spline_interpolator
time_sep_force: get t he time and force from this as x,y to
spline_interpolator
slice_to_fit: part of x,f to fit
**kwargs: passed to spline_interpolator
returns:
see spline_interpolator
"""
x,f = time_sep_force.Time,time_sep_force.Force
if (slice_to_fit is None):
slice_to_fit = slice(0,None,1)
return spline_interpolator(tau,x[slice_to_fit],f[slice_to_fit],
**kwargs)
def local_integral(y,n,mode='reflect'):
"""
gets the integral of y_i from -n to n (total of 2*n points)
Args:
y: to integrate
n: window size (in either direction)
mode: see cumtrapz
Returns:
array, same size as y, of the centered integral (edges are
clamped in integral centering)
"""
cumulative_integral = cumtrapz(y=y, dx=1.0, axis=-1, initial=0)
return local_centered_diff(cumulative_integral,n)
def local_centered_diff(y,n):
"""
return the local centered difference: y[n]-y[-n], with zeros at the
boundaries points (ie 0 and y.size-1),
Args:
y: to get the centered diff of
n: the size of the window
Returns:
array a, same size as y, where a[i] = y[min(i,y.size-1)]-y[max(0,i)]
"""
# get the 'initial' points. this is the first point for the first n,
# then the remainder of the array (eg: y[n] has an initial of y[0],
# y[n+1] has an initial of y[1], but y[0] has an initial of y[0]
yi = np.zeros(y.size)
yi[:n] = y[0]
yi[n:] = y[:-n]
# ibid, except the final points. y[0] gets y[n], y[n] gets y[n]
yf = np.zeros(y.size)
yf[-n:] = y[-1]
yf[:-n] = y[n:]
return yf-yi
def local_average(f,n,size=None,origin=None,mode='reflect'):
"""
get the local, windowed function of the average, +/- n
Args:
f: what we want the stdev of
n: window size (in either direction)
mode: see uniform_filter1d
Returns:
array, same size as f, with the dat we want
"""
if (size is None):
size = 2*n
if (origin is None):
origin = 0
return uniform_filter1d(f, size=size, mode=mode, origin=origin)
def local_stdev(f,n):
"""
Gets the local standard deviaiton (+/- n), except at boundaries
where it is just in the direction with data
Args:
f: what we want the stdev of
n: window size (in either direction)
Returns:
array, same size as f, with the dat we want
"""
max_n = f.size
# go from (i-n to i+n)
"""
for linear stdev, see:
stackoverflow.com/questions/18419871/
improving-code-efficiency-standard-deviation-on-sliding-windows
"""
mode = 'reflect'
c1 = local_average(f,n)
c2 = local_average(f*f,n)
# sigma^2 = ( <x^2> - <x>^2 )^(1/2), shouldnt dip below 0
safe_variance = np.maximum(0,c2 - c1*c1)
stdev = (safe_variance**.5)
return stdev
def filter_fec(obj,n_points):
to_ret = copy.deepcopy(obj)
to_ret.Force = spline_interpolated_by_index(obj.Force,n_points)
to_ret.Separation = spline_interpolated_by_index(obj.Separation,n_points)
to_ret.ZSnsr = spline_interpolated_by_index(obj.ZSnsr,n_points)
return to_ret
def bc_coeffs_load_force_2d(loading_true,loading_pred,bins_load,
ruptures_true,ruptures_pred,bins_rupture):
"""
returns the bhattacharya coefficients for the distriutions of the loading
rate, rupture force, and 2-d version
Args:
<x>_<true/pred>: the list of <x> values that are ground truth or
predicted
bins<x>: for histogramming
Return:
three-tuple of BC coefficient (between 0 and 1)for the distributions of
loading rate, rupture force, and their 2-tuple
"""
coeff_load = bhattacharyya_probability_coefficient_1d(loading_true,
loading_pred,
bins_load)
coeff_force = bhattacharyya_probability_coefficient_1d(ruptures_true,
ruptures_pred,
bins_rupture)
# do a 2-d coefficient
tuple_true = [loading_true,ruptures_true]
tuple_pred = [loading_pred,ruptures_pred]
tuple_bins = [bins_load,bins_rupture]
coeff_2d = bhattacharyya_probability_coefficient_dd(tuple_true,tuple_pred,
tuple_bins)
coeffs = [coeff_load,coeff_force,coeff_2d]
return coeffs
def bhattacharyya_probability_coefficient_1d(v1,v2,bins):
"""
# return the bhattacharyya distance between two 1-d arras
Args:
v<1/2>: see bhattacharyya_probability_coefficient_dd, except 1-D
bins: how to bin them, see
Returns:
bhattacharyya distance, see bhattacharyya_probability_coefficient
"""
return bhattacharyya_probability_coefficient_dd(v1,v2,[bins])
def bhattacharyya_probability_coefficient_dd(v1,v2,bins,normed=False):
"""
# return the bhattacharyya distance between arbitrary-dimensional
#probabilities, see bhattacharyya_probability_coefficient
Args:
v<1/2>: two arbitrary-dimensional lists to compare
bins: how to bin them
Returns:
bhattacharyya distance, see bhattacharyya_probability_coefficient
"""
histogram_kwargs = dict(bins=bins,weights=None,normed=normed)
v1_hist,v1_edges = np.histogramdd(sample=v1,**histogram_kwargs)
v2_hist,v2_edges = np.histogramdd(sample=v2,**histogram_kwargs)
return bhattacharyya_probability_coefficient(v1_hist,v2_hist)
def div0(a,b,replace_div_0=0):
"""
divide a by b, replacing any diviede by zero with repalace_div_0
Args:
a: numerator
b: denom
replace_div_0: what to replace the value with if we divide by zero
"""
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide( a, b )
c[~np.isfinite( c )] = replace_div_0 # -inf inf NaN
return c
def bhattacharyya_probability_coefficient(v1_hist,v2_hist):
"""
# return the bhattacharyya distance between the probabilities, see:
# https://en.wikipedia.org/wiki/Bhattacharyya_distance
Args:
v<1/2>_hist: values of two ditributions in each bins
Returns:
bhattacharyya distance
"""
v1_hist = v1_hist.flatten()
v2_hist = v2_hist.flatten()
# if we divide by zero, then one of the probabilities was all zero -- ignore
p1 = v1_hist/sum(v1_hist)
p2 = v2_hist/sum(v2_hist)
prod = p1 * p2
return sum(np.sqrt(prod))
def stdevs_epsilon_sigma(y,interpolated_y,n_points):
# get a model for the local standard deviaiton
diff = y-interpolated_y
stdevs = local_stdev(diff,n_points)
sigma = np.std(stdevs)
epsilon = np.median(stdevs)
return stdevs,epsilon,sigma
def _surface_index(filtered_y,y,last_less_than=True):
"""
Get the surface index
Args:
y: the y we are searching for the surface of (raw)
filtered_y: the filtered y value
n_smooth: number to smoothing
last_less_than: if true (default, 'raw' data), then we find the last
time we are less than the baseline in obj.Force. Otherwise, the first
time we are *greater* than...
Returns
the surface index and baseline in force
"""
median = np.median(y)
lt = np.where(y < median)[0]
# determine the last time we were less than the median;
# use this as a marker between the invols and the surface region
last_lt = lt[-1]
x = np.arange(start=0,stop=y.size,step=1)
x_approach = x[:last_lt]
x_invols = x[last_lt:]
coeffs_approach = np.polyfit(x=x_approach,y=y[:last_lt],deg=1)
coeffs_invols = np.polyfit(x=x_invols,y=y[last_lt:],deg=1)
pred_approach = np.polyval(coeffs_approach,x=x)
pred_invols = np.polyval(coeffs_invols,x=x)
surface_idx = np.argmin(np.abs(pred_approach-pred_invols))
# iterate to get the final surface touchoff ; where is the filtered
# version less than the line?
where_touch = np.where( (x <= surface_idx) &
(filtered_y <= pred_approach))[0]
if (where_touch.size > 0):
surface_idx = where_touch[-1]
# the final baseline is just the value of the approach
median = pred_approach[surface_idx]
return median,surface_idx
def get_surface_index(obj,n_smooth,last_less_than=True):
"""
Get the surface index
Args:
see _surface_index
Returns
see _surface_index, except last (extra) tuple element is filtered
obj
"""
filtered_obj = filter_fec(obj,n_smooth)
baseline,idx = _surface_index(filtered_obj.Force,obj.Force,
last_less_than=last_less_than)
return baseline,idx,filtered_obj
def zero_by_approach(split_fec,n_smooth,flip_force=True):
"""
zeros out (raw) data, using n_smooth points to do so
Args:
split_fec: instead of split_force_extension
n_smooth: number of points for smoothing
flip_force: if true, multiplies the zeroed force by -1
Returns:
nothing, but modifies split_fec to be zerod appropriately.
"""
# PRE: assume the approach is <50% artifact and invols
approach = split_fec.approach
force_baseline,idx_surface,filtered_obj = \
get_surface_index(approach,n_smooth,last_less_than=True)
idx_delta = approach.Force.size-idx_surface
# get the separation at the baseline
separation_baseline = filtered_obj.Separation[idx_surface]
zsnsr_baseline = filtered_obj.Zsnsr[idx_surface]
"""
plt.subplot(2,1,1)
plt.plot(approach.Force,alpha=0.3)
plt.plot(filtered_obj.Force)
plt.axvline(idx_surface)
plt.subplot(2,1,2)
plt.plot(split_fec.retract.Force)
plt.show()
"""
# zero everything
split_fec.zero_all(separation_baseline,zsnsr_baseline,force_baseline,
force_baseline)
if (flip_force):
split_fec.flip_forces()
def slice_func_fec(fec,slice_v):
"""
makes a copy of the fec, slicing the data fields to slice_v
Args:
fec: the force-extension curve to use
slice_v: the slice of fec to get
Returns:
the sliced version of fec
"""
to_ret = copy.deepcopy(fec)
slice_f = lambda x: x[slice_v]
to_ret.Force = slice_f(to_ret.Force)
to_ret.Separation = slice_f(to_ret.Separation)
to_ret.ZSnsr = slice_f(to_ret.ZSnsr)
to_ret.Time = slice_f(to_ret.Time)
return to_ret
def split_FEC_by_meta(time_sep_force_obj):
"""
given a time_sep_force object, splits it into approach, retract, and dwell
by the meta information
Args:
time_sep_force_obj: whatever object to split, should have triggertime
and dwelltime
Returns:
scipy.interpolate.LSQUnivariateSpline object, interpolating f on x
"""
start_of_dwell_time = time_sep_force_obj.TriggerTime
end_of_dwell_time = start_of_dwell_time + \
time_sep_force_obj.SurfaceDwellTime
get_idx_at_time = lambda t: np.argmin(np.abs(time_sep_force_obj.Time-t))
start_of_dwell = get_idx_at_time(start_of_dwell_time)
end_of_dwell = get_idx_at_time(end_of_dwell_time)
# slice the object into approach, retract, dwell
slice_func = lambda s: slice_func_fec(time_sep_force_obj,s)
approach = slice_func(slice(0 ,start_of_dwell,1))
dwell = slice_func(slice(start_of_dwell,end_of_dwell ,1))
retract = slice_func(slice(end_of_dwell ,None ,1))
"""
plt.plot(approach.Time,approach.Force)
plt.plot(dwell.Time,dwell.Force)
plt.plot(retract.Time,retract.Force)
print(start_of_dwell,end_of_dwell)
plt.show()
"""
return split_force_extension(approach,dwell,retract)
def spline_residual_mean_and_stdev(f,f_interp,start_q=1):
"""
returns the mean and standard deviation associated with f-f_interp,
from start_q% to 100-startq%
Args:
f: the 'noisy' function
f_interp: the interpolated f (splined)
start_q: the start perctile; we have to ignore huge outliers
Returns:
tuple of mean,standard deviation
"""
# symetrically choose percentiles for the fit
f_minus_mu = f-f_interp
qr_1,qr_2 = np.percentile(a=f_minus_mu,q=[start_q,100-start_q])
idx_fit = np.where( (f_minus_mu >= qr_1) &
(f_minus_mu <= qr_2))
# fit a normal distribution to it, to get the standard deviation (globally)
mu,std = norm.fit(f_minus_mu[idx_fit])
return mu,std
def spline_gaussian_cdf(f,f_interp,std):
"""
returns the CDF associated with the random variable with mean given by
f_interp and standard deviation associated with std, assuming gaussian
about f-finterp
Args:
f: see spline_residual_mean_and_stdev
f_interp: see spline_residual_mean_and_stdev
std: standard deviation
Returns:
cummulative distribution
"""
# get the distribution of the actual data
distribution_force = norm(loc=f_interp, scale=std)
# get the cdf of the data
return distribution_force.cdf(f)
def spline_interpolated_by_index(f,nSmooth,**kwargs):
"""
returnsa spline interpolator of f versus 0,1,2,...,(N-1)
Args:
f: function to interpolate
nSmooth: distance between knots (smoothing number)
**kwargs: passed to spline_interpolator
Returns:
spline interpolated value of f on the indices (*not* an interpolator
object, just an array)
"""
x,interp = spline_interpolator_by_index(f,nSmooth,**kwargs)
return interp(x)
def spline_interpolator_by_index(f,n_smooth,**kwargs):
"""
see spline_interpolated_by_index. except returns tuple of <x,interpolator
object>
Args:
see spline_interpolated_by_index
Returns:
see spline_interpolated_by_index
"""
x = np.arange(start=0,stop=f.size,step=1)
return x,spline_interpolator(n_smooth,x,f,**kwargs)
def spline_interpolator(tau_x,x,f,knots=None,deg=2):
"""
returns a spline interpolator with knots uniformly spaced at tau_x over x
Args:
tau_x: the step size in whatever units of c
x: the unit of 'time'
f: the function we want the autocorrelation of
knots: the locations of the knots (default to uniform in x)
deg: the degree of the spline interpolator to use. continuous to
deg-1 derivative
Returns:
scipy.interpolate.LSQUnivariateSpline object, interpolating f on x
"""
# note: stop is *not* included in the iterval, so we add an extra step
# to make it included
if (knots is None):
step_knots = tau_x/2
knots = np.arange(start=min(x),stop=max(x)+step_knots,
step=step_knots)
# get the spline of the data
spline_args = \
dict(
# degree is k, (k-1)th derivative is continuous
k=deg,
# specify the spline knots (t) uniformly in time at the
# autocorrelation time. dont want the endpoints
t=knots[1:-1]
)
return interpolate.LSQUnivariateSpline(x=x,y=f,**spline_args)
def auto_correlation_helper(auto):
# normalize the auto correlation, add in a small bias to avoid
# taking the log of 0. data is normalized to 0->1, so it should be OK
tol = 1e-9
# auto norm goes from 0 to 1
auto_norm = (auto - np.min(auto))/(np.max(auto)-np.min(auto))
auto_median_normed = auto_norm - np.median(auto_norm)
# statistical norm goes from -1 to 1
statistical_norm = (auto_norm - 0.5) * 2
log_norm = np.log(auto_norm + tol)
fit_idx_max = np.where(auto_median_normed < 0.25)[0]
assert fit_idx_max.size > 0 , "autocorrelation doesnt dip under threshold"
# get the first time we cross under the threshold
fit_idx_max = fit_idx_max[0]
return auto_norm,statistical_norm,log_norm,fit_idx_max
def auto_correlation_tau(x,f_user,deg_autocorrelation=1,
autocorrelation_skip_points=None,fit_idx_max=None):
"""
get the atucorrelation time of f (ie: fit polynomial to log(autocorrelation)
vs x, so the tau is more or less the exponential decay constant
Args:
x: the unit of 'time'
f_user: the function we want the autocorrelation of
deg_autocorrelation: the degree of autocorrelation to use. defaults to
linear, to get the 1/e time of autocorrelation
fit_idx_max: maximum index to fit. defaults to until we hit 0 in
the statistical autocorrelation
Returns:
tuple of <autocorrelation tau, coefficients of log(auto) vs x fit,
auto correlation ('raw')>
"""
f = f_user - np.mean(f_user)
auto = np.correlate(f,f,mode='full')
# only want the last half (should be identical?)
size = int(auto.size/2)
auto = auto[size:]
if (autocorrelation_skip_points is not None):
auto = auto[autocorrelation_skip_points:]
auto_norm,statistical_norm,log_norm,fit_idx_max_tmp = \
auto_correlation_helper(auto)
if fit_idx_max is None:
fit_idx_max = fit_idx_max_tmp
# git a high-order polynomial to the auto correlation spectrum, get the 1/e
# time.
coeffs = np.polyfit(x=x[:fit_idx_max],y=log_norm[:fit_idx_max],
deg=deg_autocorrelation)
# get just the linear and offset
linear_auto_coeffs = coeffs[-2:]
# get tau (coefficient in the exponent, y=A*exp(B*t), so tau=1/B
# take the absolute value, since tau is a decay, has a minus
tau = abs(1/linear_auto_coeffs[0])
return tau,coeffs,auto
def zero_and_split_force_extension_curve(example,fraction=0.02):
"""
zeros a force extension curve by its meta information and the touchoff
on the approach
Args:
example: 'raw' force extension to use (negative force is away
from surface on molecule)
fraction: the portion of the curve to use for smoothing
returns:
example as an Analysis.split_force_extension object
"""
example_split = split_FEC_by_meta(example)
approach = example_split.approach
retract = example_split.retract
f = approach.Force
x = approach.Time
n_approach = f.size
n_retract = retract.Force.size
num_points_approach = int(np.ceil(n_approach * fraction))
num_points_retract = int(np.ceil(n_retract * fraction))
# zero out everything to the approach using the autocorrelation time
zero_by_approach(example_split,num_points_approach)
example_split.set_tau_num_points(num_points_retract)
example_split.set_tau_num_points_approach(num_points_approach)
return example_split
def _loading_rate_helper(x,y):
if (x.size < 2):
raise IndexError("Can't fit a line to something with <2 points")
coeffs = np.polyfit(y=y,x=x,deg=1)
predicted = np.polyval(coeffs,x=x)
loading_rate, _ = coeffs
# determine the last time the *data* is above the prediction
where_above = np.where(y > predicted)[0]
if (where_above.size == 0):
# unlikely but worth checking
last_idx_above = np.argmax(y)
else:
last_idx_above = where_above[-1]
# determine what we *predict* to be the value at that point
rupture_force = predicted[last_idx_above]
return coeffs,predicted,loading_rate,rupture_force,last_idx_above
def loading_rate_rupture_force_and_index(time,force,slice_to_fit):
"""
given a portion of time and force to fit, the loading rate is determined
by the local slope. The rupture force is determined by finding the last
time the (XXX should use two lines in case of flickering)
Args:
time/force: should be self-explanatory. Force should be zeroed.
slice_to_fit: where we are fitting
Returns:
tuple of <loading rate,rupture force,index_of_rupture>
"""
x = time[slice_to_fit]
y = force[slice_to_fit]
# XXX can fit a line, throw an error?
_,_,loading_rate,rupture_force,last_idx_above = \
_loading_rate_helper(x,y)
return loading_rate,rupture_force,last_idx_above
def get_before_and_after_and_zoom_of_slice(split_fec):
event_idx_retract = split_fec.get_retract_event_centers()
index_before = [0] + [e for e in event_idx_retract]
index_after = [e for e in event_idx_retract] + [None]
slices_before = [slice(i,f,1)
for i,f in zip(index_before[:-1],index_after[:-1])]
slices_after = [slice(i,f,1)
for i,f in zip(index_before[1:],index_after[1:])]
return slices_before,slices_after
| gpl-3.0 |
xavierwu/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
matsur8/noisy_label | noisy_label.py | 1 | 5032 | # -*- coding:utf-8 -*-
from __future__ import print_function
import numpy as np
from sklearn.metrics import log_loss
def make_transition_matrix(gamma, n_classes=None):
d = np.array(gamma).ndim
if d == 0:
Gamma = (1 - gamma) / (1 - n_classes) * np.ones((n_classes, n_classes))
np.fill_diagonal(Gamma, gamma)
elif d == 1:
n_classes = gamma.shape[0]
Gamma = ((1 - gamma) / (n_classes - 1.0))[:,np.newaxis] * np.ones(n_classes)
np.fill_diagonal(Gamma, gamma)
else:
Gamma = gamma
return Gamma
class NoisyLabelModel(object):
def __init__(self, clean_model, n_classes, noise_model="symmetric", gamma=1.0):
"""
noise_model: symmetric The flipping probability Prob[y_clean -> y_noisy] is independent of y_clean and y_noisy.
uniform The flipping probability Prob[y_clean -> y_noisy] is depend on y_clean but independen of y_noisy.
nonuniform The flipping probability Prob[y_clean -> y_noisy] is depend on y_clean and y_noisy.
"""
self.clean_model = clean_model
self.n_classes = n_classes
self.noise_model = noise_model
self.Gamma = make_transition_matrix(gamma, n_classes)
def get_clean_model(self):
return self.clean_model
def predict_proba(self, X):
""" predict probability that a noisy label is 1"""
y_pred = self.clean_model.predict_proba(X)
return y_pred.dot(self.Gamma)
def infer_clean_label(self, X, y=None):
""" infer true labels from X and noisy labels y """
y_pred = self.clean_model.predict_proba(X)
if y is None:
return y_pred
prob_to_y = y.dot(self.Gamma.transpose())
joint_prob = y_pred * prob_to_y
return y_pred, joint_prob / joint_prob.sum(1)[:,np.newaxis]
def log_likelihood(self, X, y):
""" return average log likelihood per data point.
y: noisy labels
"""
y_pred = self.predict_proba(X)
return log_loss(y, y_pred)
def estimate_flip_proba(self, true_labels, noisy_labels):
contingency = true_labels.transpose().dot(noisy_labels)
if self.noise_model == "symmetric":
g = np.diag(contingency).sum() / n
elif self.noise_model == "uniform":
g = np.diag(contingency) / contingency.sum(1)
else:
g = g
Gamma = make_transition_matrix(g, self.n_classes)
return Gamma
def fit(self, X, y, estimates_flip_proba=True, max_iter=50, init_model=True, init_flip_proba=True, arg_fit=None, verbose=False):
""" fit model to data with an EM algorithm.
The algorithm is proposed in [1], which considers binary classification and
uses multiple noisy labels for each data point.
(Initialization is not the same as that proposed in [1]. See below.)
y: noisy labels.
init_model: initialize parameters of self.clean_model by fitting it to noisy labels if init_model is True.
init_flip_proba: initialize flipping probabilities by using error probabilities of the initial clean model if init_flip_proba is True.
arg_fit: dictionary of arguments passed to self.clean_model.fit besides X and y.
[1] Rayker et al., "Learning From Crowds", 2010,
http://www.jmlr.org/papers/volume11/raykar10a/raykar10a.pdf
"""
n = y.shape[0]
n1 = np.sum(y)
n0 = n - n1
if arg_fit is None:
arg_fit = {}
#initialization
if init_model:
self.clean_model.fit(X, y, **arg_fit)
if init_flip_proba:
r = self.clean_model.predict_proba(X)
self.Gamma = self.estimate_flip_proba(r, y)
for i in range(max_iter):
#e-step
y_pred, y_pred_post = self.infer_clean_label(X, y)
if verbose:
y_pred_noisy = y_pred.dot(self.Gamma)
print("{} iterations: cross entropy {}".format(i, log_loss(y, y_pred_noisy)))
#m-step
self.clean_model.fit(X, y_pred_post, **arg_fit)
if estimates_flip_proba:
self.Gamma = self.estimate_flip_proba(y_pred_post, y)
def loss_unbiased(loss, y, y_pred, Gamma):
""" return an unbiased estimate of loss.
An unbiased estimator for binary classification is proposed in [1]. This is its generalization to multiclass problems.
loss: loss function (noisy_label, prediction) -> loss
y: noisy labels
y_pred: predictions:
Gamma: transition matrix (Gamma[i,j] = Prob[y_noisy=j | y_clean=i])
[1] Natarajan et al., "Learning with noisy labels", 2013,
https://papers.nips.cc/paper/5073-learning-with-noisy-labels.pdf
TODO: Test.
"""
L = np.zeros(y.shape)
for i in range(y.shape[1]):
yi = np.zeros(y.shape)
yi[:,i] = 1
L[:,i] = loss(yi, y_pred)
return (y.transpose().dot(L) * np.linalg.inv(Gamma)).sum()
| mit |
rohitgirdhar-cmu-experimental/fast-rcnn-with3D | tools/demo.py | 6 | 5445 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from utils.cython_nms import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'vgg16_fast_rcnn_iter_40000.caffemodel'),
'vgg_cnn_m_1024': ('VGG_CNN_M_1024',
'vgg_cnn_m_1024_fast_rcnn_iter_40000.caffemodel'),
'caffenet': ('CaffeNet',
'caffenet_fast_rcnn_iter_40000.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name, classes):
"""Detect object classes in an image using pre-computed object proposals."""
# Load pre-computed Selected Search object proposals
box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',
image_name + '_boxes.mat')
obj_proposals = sio.loadmat(box_file)['boxes']
# Load the demo image
im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im, obj_proposals)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls in classes:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
keep = np.where(cls_scores >= CONF_THRESH)[0]
cls_boxes = cls_boxes[keep, :]
cls_scores = cls_scores[keep]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
CONF_THRESH)
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS[args.demo_net][0],
'test.prototxt')
caffemodel = os.path.join(cfg.ROOT_DIR, 'data', 'fast_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_fast_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/000004.jpg'
demo(net, '000004', ('car',))
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/001551.jpg'
demo(net, '001551', ('sofa', 'tvmonitor'))
plt.show()
| mit |
LohithBlaze/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
vatika/Automated-Essay-Grading | extract.py | 1 | 1771 | import csv
import nltk
import string
from collections import Counter
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem.porter import PorterStemmer
from sklearn.metrics.pairwise import linear_kernel
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(f):
tokens = nltk.word_tokenize(f)
stems = stem_tokens(tokens, stemmer)
return stems
c = []
with open('Data/training_3.csv', 'rb') as train:
reader = csv.reader(train)
all_words = []
essays = []
for row in reader:
essay = row[2].translate(None, string.punctuation).decode('utf-8')
tokens = tokenize(essay)
all_words.extend(tokens)
c.append(tokens)
essays.append(essay)
count = Counter(all_words)
#most common words in all essays
common_words = [x[0] for x in count.most_common(10)]
tfidf = TfidfVectorizer(tokenizer=tokenize, stop_words='english')
tfs = tfidf.fit_transform(essays)
feature_names = tfidf.get_feature_names()
v = []
for i in xrange(len(c)):
value = 0
c_e = Counter(c[i])
common_10_e = c_e.most_common(10)
common_words_e = [x[0] for x in c_e.most_common(10)]
for j in xrange(len(common_words_e)):
if common_words_e[j] in common_words:
for col in tfs.nonzero()[1]:
if feature_names[col] == common_words_e[j]:
value += common_10_e[j][1]*tfs[0,col]
break
v.append(value)
print v
#cosine_similarities = linear_kernel(tfidf[0:1], tfidf).flatten()
#related = cosine_similarities.argsort()[:-5:-1]
| gpl-2.0 |
collinj22/Indeed-Entry-level-Job-Search | Indeed_Job_Search_Spyre/server.py | 1 | 1509 | from spyre import server
import IndeedSearch
import pandas as pd
import openpyxl
import time
import os
import io
server.include_df_index = True
pd.set_option('display.max_colwidth', -1)
class IndeedJobSearch(server.App):
title = "Indeed Job Search"
inputs = [{"type": "text",
"key": "job",
"label": "Job Title",
"action_id": "update_data"},
{"type": "text",
"key": "location",
"label": "Location",
"action_id": "update_data"},
{"type": "text",
"key": "radius",
"label": "Radius",
"action_id": "update_data"}]
controls = [{"type": "button",
"label": "Search",
"id": "update_data"},
{"type": "button",
"label": "Download Excel File",
"id": "results_csv"}]
outputs = [{"type": "table",
"id": "table_id",
"control_id": "update_data",
"tab": "Table",
"on_page_load": False},
{'type': 'download',
'id': 'results_csv',
'on_page_load': False}]
def getData(self, params):
df = IndeedSearch.main(params['job'], params['location'], params['radius'])
return df.drop(['description'],axis=1)
if __name__ == '__main__':
app = IndeedJobSearch()
app.launch(host='0.0.0.0', port=int(os.environ.get('PORT', '5000')))
| mit |
asoliveira/NumShip | scripts/plot/r-zz-plt.py | 1 | 2856 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'r-zz'
#Qual título colocar no gráficos?
titulo = ''#'Curva de Giro'
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
poshis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/pos.dat')
poshis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/pos.dat')
poshis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/pos.dat')
poshis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/pos.dat')
lemehis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/leme.dat')
lemehis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/leme.dat')
lemehis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/leme.dat')
lemehis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/leme.dat')
axl = [00, 1000, -40, 40]
#Plotando a Curva de Giro
if adi:
ylabel = r'$\psi\prime$'
xposlabel = r'$t\prime$'
else:
ylabel = r'$\psi \quad graus$'
xposlabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(poshis[:, 0], poshis[:, 6] * (180/sp.pi), color = pc, linestyle = ps,
linewidth = 1, label=ur'padrão')
plt.plot(lemehis[:, 0], lemehis[:, 1] * (180/sp.pi), color = pc, linestyle = "--",
linewidth = 1, label=ur'leme--padrão')
plt.plot(poshis2[:, 0], poshis2[:, 6] * (180/sp.pi), color = r1c,linestyle = r1s,
linewidth = 1, label=ur'1.1$r$')
plt.plot(lemehis2[:, 0], lemehis2[:, 1] * (180/sp.pi), color = r1c, linestyle = "--",
linewidth = 1, label=ur'leme--1.1$r$')
plt.plot(poshis3[:, 0], poshis3[:, 6] * (180/sp.pi), color = r2c, linestyle = r2s,
linewidth = 1, label = ur'1.2$r$')
plt.plot(lemehis3[:, 0], lemehis3[:, 1] * (180/sp.pi), color = r2c, linestyle = "--",
linewidth = 1, label=ur'leme--1.2$r$')
plt.plot(poshis4[:, 0], poshis4[:, 6] * (180/sp.pi), color = r3c, linestyle = r3s,
linewidth = 1, label= ur'1.3$r$')
plt.plot(lemehis4[:, 0], lemehis4[:, 1] * (180/sp.pi), color = r3c, linestyle = "--",
linewidth = 1, label=ur'leme--1.3$r$')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xposlabel)
plt.axis(axl)
plt.grid(True)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 |
gfyoung/pandas | pandas/tests/arrays/boolean/test_astype.py | 8 | 1603 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
def test_astype():
# with missing values
arr = pd.array([True, False, None], dtype="boolean")
with pytest.raises(ValueError, match="cannot convert NA to integer"):
arr.astype("int64")
with pytest.raises(ValueError, match="cannot convert float NaN to"):
arr.astype("bool")
result = arr.astype("float64")
expected = np.array([1, 0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = arr.astype("str")
expected = np.array(["True", "False", "<NA>"], dtype="<U5")
tm.assert_numpy_array_equal(result, expected)
# no missing values
arr = pd.array([True, False, True], dtype="boolean")
result = arr.astype("int64")
expected = np.array([1, 0, 1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = arr.astype("bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
def test_astype_to_boolean_array():
# astype to BooleanArray
arr = pd.array([True, False, None], dtype="boolean")
result = arr.astype("boolean")
tm.assert_extension_array_equal(result, arr)
result = arr.astype(pd.BooleanDtype())
tm.assert_extension_array_equal(result, arr)
def test_astype_to_integer_array():
# astype to IntegerArray
arr = pd.array([True, False, None], dtype="boolean")
result = arr.astype("Int64")
expected = pd.array([1, 0, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
| bsd-3-clause |
yl3dy/vmetf | task1.py | 1 | 5702 | #!/usr/bin/env python3
import sys
from numpy import empty, linspace, float64
from math import floor
import matplotlib.pyplot as plt
from matplotlib.animation import ArtistAnimation
from os import system
# Constants:
A = 0.5
X0 = 0.1
F1 = 0
F2 = 1
Q = 0.1
# size
L = 1
H = 0.002
# time
T = 1
TAU = 0.001
PLOT_NUM = 250 # number of iters to plot
GRID_X_SIZE = floor(L / H) + 1
GRID_T_SIZE = floor(T / TAU) + 1
X_VALUES = linspace(0, L, GRID_X_SIZE)
T_VALUES = linspace(0, T, GRID_T_SIZE)
def plot_result(t, data1, data2, data3):
"""Output results in image files."""
plt.axis([0., L, -0.1, F2*1.1])
plt.plot(X_VALUES, data1, X_VALUES, data2, X_VALUES, data3)
plt.savefig('task1-out/{}.png'.format(t))
plt.clf()
class AnimatedPlot:
"""For animated result output using matplotlib.animate."""
sequence = []
fig = plt.figure()
def __init__(self):
plt.axis([0., L*0.5, -0.1, 1.1])
def add_frame(self, t, data1, data2, data3=None):
if not data3 == None:
self.sequence.append(plt.plot(X_VALUES, data1, 'r',
X_VALUES, data2, 'b',
X_VALUES, data3, 'g'))
else:
self.sequence.append(plt.plot(X_VALUES, data1, 'r',
X_VALUES, data2, 'b'))
def finalize(self):
animated_seq = ArtistAnimation(self.fig, self.sequence,
interval=100, blit=True)
plt.show()
def exact_solution(t):
"""Analytical solution at some t."""
f_grid = empty([GRID_X_SIZE])
x_n = int((X0 + A*t) / H)
f_grid[:x_n], f_grid[x_n:] = F2, F1
return f_grid
def smoothing(f_next):
f_tmp = f_next.copy()
for i in range(2, GRID_X_SIZE-2):
D_m = f_next[i] - f_next[i-1]
D_mm = f_next[i-1] - f_next[i-2]
D_p = f_next[i+1] - f_next[i]
D_pp = f_next[i+2] - f_next[i+1]
if D_p*D_m <= 0 or D_p*D_pp <= 0:
Q_plus = D_p
else:
Q_plus = 0
if D_p*D_m <= 0 or D_m*D_mm <= 0:
Q_minus = D_m
else:
Q_minus = 0
f_tmp[i] = f_next[i] + Q * (Q_plus - Q_minus)
return f_tmp
def linear_methods():
"""Solution of linear equation using simple and Lax-Wendroff methods."""
anim = AnimatedPlot()
f_simple, f_simple_old = empty([GRID_X_SIZE]), empty([GRID_X_SIZE])
f_lw, f_lw_old = empty([GRID_X_SIZE]), empty([GRID_X_SIZE])
f_simple[0], f_lw[0] = F2, F2
f_simple_old, f_lw_old = exact_solution(0.), exact_solution(0.)
for n in range(GRID_T_SIZE):
f_exact = exact_solution(T_VALUES[n])
if n == 0:
anim.add_frame(n, f_exact, f_exact, f_exact)
continue
f_plus, f_minus = F2, F2
for i in range(1, GRID_X_SIZE):
# Simple 1st order
f_simple[i] = f_simple_old[i] - (A*TAU / H) * (f_simple_old[i] -
f_simple_old[i-1])
# Lax-Wendroff
if i < (GRID_X_SIZE-1):
f_plus = 0.5 * (f_lw_old[i] + f_lw_old[i+1]) - \
(A*TAU)/(2*H) * (f_lw_old[i+1] - f_lw_old[i])
f_lw[i] = f_lw_old[i] - (A*TAU/H) * (f_plus - f_minus)
f_plus, f_minus = f_minus, f_plus
f_simple, f_simple_old = f_simple_old, f_simple
f_lw[-1] = f_lw[-2]
f_lw = smoothing(f_lw)
f_lw, f_lw_old = f_lw_old, f_lw
# do not plot too often
if n % int(floor(GRID_T_SIZE / PLOT_NUM)) == 0:
anim.add_frame(n, f_exact, f_simple, f_lw)
anim.finalize()
def nonlinear_methods():
"""Solution of nonlinear equation (same methods as above)."""
anim = AnimatedPlot()
f_simple, f_simple_old = empty([GRID_X_SIZE]), empty([GRID_X_SIZE])
f_lw, f_lw_old = empty([GRID_X_SIZE]), empty([GRID_X_SIZE])
f_simple[0], f_lw[0] = F2, F2
f_simple_old, f_lw_old = exact_solution(0.), exact_solution(0.)
for n in range(GRID_T_SIZE):
f_exact = exact_solution(T_VALUES[n])
if n == 0:
anim.add_frame(n, f_exact, f_exact, f_exact)
continue
f_plus, f_minus = F2, F2
for i in range(1, GRID_X_SIZE):
# Simple 1st order
f_simple[i] = f_simple_old[i] - 0.5 * (TAU / H) * (f_simple_old[i]**2 -
f_simple_old[i-1]**2)
# Lax-Wendroff
if i < (GRID_X_SIZE-1):
f_plus = 0.5 * (f_lw_old[i] + f_lw_old[i+1]) - \
(TAU)/(4*H) * (f_lw_old[i+1]**2 - f_lw_old[i]**2)
f_lw[i] = f_lw_old[i] - 0.5*(TAU/H) * (f_plus**2 - f_minus**2)
f_plus, f_minus = f_minus, f_plus
f_simple, f_simple_old = f_simple_old, f_simple
f_lw[-1] = f_lw[-2]
f_lw = smoothing(f_lw)
f_lw, f_lw_old = f_lw_old, f_lw
# do not plot too often
if n % int(floor(GRID_T_SIZE / PLOT_NUM)) == 0:
anim.add_frame(n, f_simple, f_lw)
anim.finalize()
def clean_output():
"""Clean image output."""
system('rm task1-out/* 2> /dev/null')
def main():
"""Entry point."""
if len(sys.argv) != 2 or int(sys.argv[1]) not in range(1,6):
print('Bad arguments. Should specify only method number (1-5)')
quit()
arg = sys.argv[1]
prompt = lambda desc: 'Using method {}: {}'.format(arg, desc)
clean_output()
if arg == '1':
print(prompt('linear equation'))
linear_methods()
elif arg == '2':
print(prompt('Burgers equation'))
nonlinear_methods()
if __name__ == '__main__':
main()
# vim:set tw=0:
| mit |
mmaelicke/scikit-gstat | skgstat/data/__init__.py | 1 | 8650 | import pandas as pd
from skgstat.data import _loader
from skgstat.data._loader import field_names
# define all names
names = field_names()
origins = dict(
pancake="""Image of a pancake with apparent spatial structure.
Copyright Mirko Mälicke, 2020. If you use this data,
cite SciKit-GStat: https://doi.org/10.5281/zenodo.1345584
""",
aniso="""Random field greyscale image with geometric anisotropy.
The anisotropy in North-East direction has a factor of 3. The random
field was generated using gstools.
Copyright Mirko Mälicke, 2020. If you use this data,
cite SciKit-GStat: https://doi.org/10.5281/zenodo.1345584
""",
meuse=""""Sample dataset of real measurements of heavy metal pollutions
in the topsoil on a 15x15 meter plot along the river Meuse.
The data is distributed along with the R-package sp.
IMPORTANT: If you use this data, cite Pebesma and Bivand (2005)
and Bivand et al (2013):
Pebesma EJ, Bivand RS (2005). “Classes and methods for spatial
data in R.” R News, 5(2), 9–13. https://CRAN.R-project.org/doc/Rnews/.
Bivand RS, Pebesma E, Gomez-Rubio V (2013). Applied spatial data
analysis with R, Second edition. Springer, NY. https://asdar-book.org/.
"""
)
# define the functions
def pancake(N=500, band=0, seed=42):
"""
Sample of the :func:`pancake_field <skgstat.data.pancake_field>`.
By default, the Red band is sampled at 500 random
location without replacement.
Parameters
----------
N : int
Number of sample points to use.
band : int
can be 0 (Red), 1 (Green), 2 (Blue) or ``'mean'``, which
will average all three RGB bands
seed : int
By default a seed is set to always return the same
sample for same N and band input
Returns
-------
result : dict
Dictionary of the sample and a citation information.
The sample is a tuple of two numpy arrays.
See Also
--------
:func:`get_sample <skgstat.data._loader.get_sample>`
:func:`pancake_field <skgstat.data.pancake_field>`
Notes
-----
The image originates from a photograph of an actual pancake.
The image was cropped to an 500x500 pixel extent keeping the
center of the original photograph.
If you use this example somewhere else, please cite
SciKit-GStat [501]_, as it is distributed with the library.
References
----------
.. [501] Mirko Mälicke, Helge David Schneider, Sebastian Müller,
& Egil Möller. (2021, April 20). mmaelicke/scikit-gstat: A scipy
flavoured geostatistical variogram analysis toolbox (Version v0.5.0).
Zenodo. http://doi.org/10.5281/zenodo.4704356
"""
sample = _loader.get_sample('pancake', N=N, seed=seed, band=band)
return dict(
sample=sample,
origin=origins.get('pancake')
)
def pancake_field(band=0):
"""
Image of a pancake with apparent spatial structure.
The pankcake has three RGB bands.
Parameters
----------
band : int
can be 0 (Red), 1 (Green), 2 (Blue) or ``'mean'``, which
will average all three RGB bands
Returns
-------
result : dict
Dictionary of the sample and a citation information.
The sample is 2D numpy array of the field.
See Also
--------
skgstat.data._loader.field
skgstat.data.pancake
Notes
-----
The image originates from a photograph of an actual pancake.
The image was cropped to an 500x500 pixel extent keeping the
center of the original photograph.
If you use this example somewhere else, please cite
SciKit-GStat [501]_, as it is distributed with the library.
References
----------
.. [501] Mirko Mälicke, Helge David Schneider, Sebastian Müller,
& Egil Möller. (2021, April 20). mmaelicke/scikit-gstat: A scipy
flavoured geostatistical variogram analysis toolbox (Version v0.5.0).
Zenodo. http://doi.org/10.5281/zenodo.4704356
"""
sample = _loader.field('pancake', band)
return dict(
sample=sample,
origin=origins.get('pancake')
)
def aniso(N=500, seed=42):
"""
Sample of the :func:`ansio_field <skgstat.data.aniso_field>`.
By default the greyscale image is sampled
at 500 random locations.
Parameters
----------
N : int
Number of sample points to use.
seed : int
By default a seed is set to always return the same
sample for same N and band input
Returns
-------
result : dict
Dictionary of the sample and a citation information.
The sample is a tuple of two numpy arrays.
See Also
--------
skgstat.data._loader.field : field loader
aniso_field : Return the full field
Notes
-----
This image was created using :any:`gstools.SRF`.
The spatial random field was created using a Gaussian model
and has a size of 500x500 pixel. The created field
was normalized and rescaled to the value range of a
:any:`uint8 <numpy.uint8>`.
The spatial model includes a small nugget (~ 1/25 of the value range).
If you use this example somewhere else, please cite
SciKit-GStat [501]_, as it is distributed with the library.
References
----------
.. [501] Mirko Mälicke, Helge David Schneider, Sebastian Müller,
& Egil Möller. (2021, April 20). mmaelicke/scikit-gstat: A scipy
flavoured geostatistical variogram analysis toolbox (Version v0.5.0).
Zenodo. http://doi.org/10.5281/zenodo.4704356
"""
sample = _loader.get_sample('aniso', N=N, seed=seed)
return dict(
sample=sample,
origin=origins.get('aniso')
)
def aniso_field():
"""
Image of a greyscale image with geometric anisotropy.
The anisotropy has a North-Easth orientation and has
a approx. 3 times larger correlation length than in
the perpendicular orientation.
Returns
-------
result : dict
Dictionary of the sample and a citation information.
The sample a numpy array repesenting the image.
See Also
--------
skgstat.data._loader.field : field loader
aniso : Return a sample
Notes
-----
This image was created using :any:`gstools.SRF`.
The spatial random field was created using a Gaussian model
and has a size of 500x500 pixel. The created field
was normalized and rescaled to the value range of a
:any:`uint8 <numpy.uint8>`.
The spatial model includes a small nugget (~ 1/25 of the value range).
If you use this example somewhere else, please cite
SciKit-GStat [501]_, as it is distributed with the library.
References
----------
.. [501] Mirko Mälicke, Helge David Schneider, Sebastian Müller,
& Egil Möller. (2021, April 20). mmaelicke/scikit-gstat: A scipy
flavoured geostatistical variogram analysis toolbox (Version v0.5.0).
Zenodo. http://doi.org/10.5281/zenodo.4704356
"""
sample = _loader.field('aniso')
return dict(
sample=sample,
origin=origins.get('aniso')
)
def meuse(variable='lead'):
"""
Returns one of the samples of the well-known Meuse dataset.
You can specify which heave metal data you want to load.
Parameters
----------
Returns
-------
result : dict
Dictionary of the sample and a citation information.
Notes
-----
The example data was taken from the R package 'sp'
as published on CRAN: https://cran.r-project.org/package=sp
The package is licensed under GPL-3, which applies
to the sample if used somewhere else.
If you use this sample, please cite the original sources
[502]_, [503]_ and not SciKit-GStat.
References
----------
.. [502] Pebesma EJ, Bivand RS (2005). “Classes and methods for spatial
data in R.” R News, 5(2), 9–13. https://CRAN.R-project.org/doc/Rnews/.
.. [503] Bivand RS, Pebesma E, Gomez-Rubio V (2013). Applied spatial data
analysis with R, Second edition. Springer, NY. https://asdar-book.org/.
"""
# check variable
if variable not in ('cadmium', 'copper', 'lead', 'zinc'):
raise AttributeError(
"variable has to be in ['cadmium', 'copper', 'lead', 'zinc']"
)
# get the data
df = _loader.read_sample_file('meuse.txt')
# get the coordinates
coords = df[['x', 'y']].values
# get the correct variable
values = df[[variable]].values
# return
return dict(
sample=(coords, values, ),
origin=origins.get('meuse')
)
| mit |
JRock007/boxxy | dist/Boxxy.app/Contents/Resources/lib/python2.7/numpy/lib/function_base.py | 30 | 124613 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from ._compiled_base import add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X -= X.mean(axis=1-axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
if axis is not None and axis >= a.ndim:
raise IndexError(
"axis %d out of bounds (%d)" % (axis, a.ndim))
if overwrite_input:
if axis is None:
part = a.ravel()
sz = part.size
if sz % 2 == 0:
szh = sz // 2
part.partition((szh - 1, szh))
else:
part.partition((sz - 1) // 2)
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
a.partition((szh - 1, szh), axis=axis)
else:
a.partition((sz - 1) // 2, axis=axis)
part = a
else:
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
part = partition(a, ((sz // 2) - 1, sz // 2), axis=axis)
else:
part = partition(a, (sz - 1) // 2, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
if indices.dtype == intp: # take the points along axis
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays together.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| mit |
fabianp/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tests/plotting/common.py | 7 | 19550 | #!/usr/bin/env python
# coding: utf-8
import nose
import os
import warnings
from pandas import DataFrame, Series
from pandas.compat import zip, iteritems
from pandas.util.decorators import cache_readonly
from pandas.types.api import is_list_like
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_is_valid_plot_return_object)
import numpy as np
from numpy import random
import pandas.tools.plotting as plotting
"""
This is a common base class used for various plotting tests
"""
def _skip_if_no_scipy_gaussian_kde():
try:
from scipy.stats import gaussian_kde # noqa
except ImportError:
raise nose.SkipTest("scipy version doesn't support gaussian_kde")
def _ok_for_gaussian_kde(kind):
if kind in ['kde', 'density']:
try:
from scipy.stats import gaussian_kde # noqa
except ImportError:
return False
return True
@tm.mplskip
class TestPlotBase(tm.TestCase):
def setUp(self):
import matplotlib as mpl
mpl.rcdefaults()
self.mpl_le_1_2_1 = plotting._mpl_le_1_2_1()
self.mpl_ge_1_3_1 = plotting._mpl_ge_1_3_1()
self.mpl_ge_1_4_0 = plotting._mpl_ge_1_4_0()
self.mpl_ge_1_5_0 = plotting._mpl_ge_1_5_0()
self.mpl_ge_2_0_0 = plotting._mpl_ge_2_0_0()
if self.mpl_ge_1_4_0:
self.bp_n_objects = 7
else:
self.bp_n_objects = 8
if self.mpl_ge_1_5_0:
# 1.5 added PolyCollections to legend handler
# so we have twice as many items.
self.polycollection_factor = 2
else:
self.polycollection_factor = 1
if self.mpl_ge_2_0_0:
self.default_figsize = (6.4, 4.8)
else:
self.default_figsize = (8.0, 6.0)
self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default'
# common test data
from pandas import read_csv
path = os.path.join(os.path.dirname(curpath()), 'data', 'iris.csv')
self.iris = read_csv(path)
n = 100
with tm.RNGContext(42):
gender = np.random.choice(['Male', 'Female'], size=n)
classroom = np.random.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
def tearDown(self):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is
True
"""
if visible and (labels is None):
raise ValueError('labels must be specified when visible is True')
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
self.assertTrue(ax.get_legend() is not None)
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
self.assertTrue(ax.get_legend() is None)
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
tm.assert_almost_equal(xpdata, rsdata)
self.assertEqual(len(xp_lines), len(rs_lines))
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections,
Collection) and not is_list_like(collections):
collections = [collections]
for patch in collections:
self.assertEqual(patch.get_visible(), visible)
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(self, collections, linecolors=None, facecolors=None,
mapping=None):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.lines import Line2D
from matplotlib.collections import (
Collection, PolyCollection, LineCollection
)
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[:len(collections)]
self.assertEqual(len(collections), len(linecolors))
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
elif isinstance(patch, (PolyCollection, LineCollection)):
result = tuple(patch.get_edgecolor()[0])
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
if facecolors is not None:
if mapping is not None:
facecolors = self._get_colors_mapped(mapping, facecolors)
facecolors = facecolors[:len(collections)]
self.assertEqual(len(collections), len(facecolors))
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
def _check_text_labels(self, texts, expected):
"""
Check each text has expected labels
Parameters
----------
texts : matplotlib Text object, or its list-like
target text, or its list
expected : str or list-like which has the same length as texts
expected text label, or its list
"""
if not is_list_like(texts):
self.assertEqual(texts.get_text(), expected)
else:
labels = [t.get_text() for t in texts]
self.assertEqual(len(labels), len(expected))
for l, e in zip(labels, expected):
self.assertEqual(l, e)
def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
"""
Check each axes has expected tick properties
Parameters
----------
axes : matplotlib Axes object, or its list-like
xlabelsize : number
expected xticks font size
xrot : number
expected xticks rotation
ylabelsize : number
expected yticks font size
yrot : number
expected yticks rotation
"""
from matplotlib.ticker import NullFormatter
axes = self._flatten_visible(axes)
for ax in axes:
if xlabelsize or xrot:
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
# If minor ticks has NullFormatter, rot / fontsize are not
# retained
labels = ax.get_xticklabels()
else:
labels = ax.get_xticklabels() + ax.get_xticklabels(
minor=True)
for label in labels:
if xlabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(),
xlabelsize)
if xrot is not None:
self.assertAlmostEqual(label.get_rotation(), xrot)
if ylabelsize or yrot:
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
labels = ax.get_yticklabels()
else:
labels = ax.get_yticklabels() + ax.get_yticklabels(
minor=True)
for label in labels:
if ylabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(),
ylabelsize)
if yrot is not None:
self.assertAlmostEqual(label.get_rotation(), yrot)
def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):
"""
Check each axes has expected scales
Parameters
----------
axes : matplotlib Axes object, or its list-like
xaxis : {'linear', 'log'}
expected xaxis scale
yaxis : {'linear', 'log'}
expected yaxis scale
"""
axes = self._flatten_visible(axes)
for ax in axes:
self.assertEqual(ax.xaxis.get_scale(), xaxis)
self.assertEqual(ax.yaxis.get_scale(), yaxis)
def _check_axes_shape(self, axes, axes_num=None, layout=None,
figsize=None):
"""
Check expected number of axes is drawn in expected layout
Parameters
----------
axes : matplotlib Axes object, or its list-like
axes_num : number
expected number of axes. Unnecessary axes should be set to
invisible.
layout : tuple
expected layout, (expected number of rows , columns)
figsize : tuple
expected figsize. default is matplotlib default
"""
if figsize is None:
figsize = self.default_figsize
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
self.assertEqual(len(visible_axes), axes_num)
for ax in visible_axes:
# check something drawn on visible axes
self.assertTrue(len(ax.get_children()) > 0)
if layout is not None:
result = self._get_axes_layout(plotting._flatten(axes))
self.assertEqual(result, layout)
self.assert_numpy_array_equal(
visible_axes[0].figure.get_size_inches(),
np.array(figsize, dtype=np.float64))
def _get_axes_layout(self, axes):
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
def _flatten_visible(self, axes):
"""
Flatten axes, and filter only visible
Parameters
----------
axes : matplotlib Axes object, or its list-like
"""
axes = plotting._flatten(axes)
axes = [ax for ax in axes if ax.get_visible()]
return axes
def _check_has_errorbars(self, axes, xerr=0, yerr=0):
"""
Check axes has expected number of errorbars
Parameters
----------
axes : matplotlib Axes object, or its list-like
xerr : number
expected number of x errorbar
yerr : number
expected number of y errorbar
"""
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
xerr_count = 0
yerr_count = 0
for c in containers:
has_xerr = getattr(c, 'has_xerr', False)
has_yerr = getattr(c, 'has_yerr', False)
if has_xerr:
xerr_count += 1
if has_yerr:
yerr_count += 1
self.assertEqual(xerr, xerr_count)
self.assertEqual(yerr, yerr_count)
def _check_box_return_type(self, returned, return_type, expected_keys=None,
check_ax_title=True):
"""
Check box returned type is correct
Parameters
----------
returned : object to be tested, returned from boxplot
return_type : str
return_type passed to boxplot
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
check_ax_title : bool
Whether to check the ax.title is the same as expected_key
Intended to be checked by calling from ``boxplot``.
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {'dict': dict, 'axes': Axes, 'both': tuple}
if expected_keys is None:
# should be fixed when the returning default is changed
if return_type is None:
return_type = 'dict'
self.assertTrue(isinstance(returned, types[return_type]))
if return_type == 'both':
self.assertIsInstance(returned.ax, Axes)
self.assertIsInstance(returned.lines, dict)
else:
# should be fixed when the returning default is changed
if return_type is None:
for r in self._flatten_visible(returned):
self.assertIsInstance(r, Axes)
return
self.assertTrue(isinstance(returned, Series))
self.assertEqual(sorted(returned.keys()), sorted(expected_keys))
for key, value in iteritems(returned):
self.assertTrue(isinstance(value, types[return_type]))
# check returned dict has correct mapping
if return_type == 'axes':
if check_ax_title:
self.assertEqual(value.get_title(), key)
elif return_type == 'both':
if check_ax_title:
self.assertEqual(value.ax.get_title(), key)
self.assertIsInstance(value.ax, Axes)
self.assertIsInstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
axes = line.axes if self.mpl_ge_1_5_0 else line.get_axes()
if check_ax_title:
self.assertEqual(axes.get_title(), key)
else:
raise AssertionError
def _check_grid_settings(self, obj, kinds, kws={}):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
import matplotlib as mpl
def is_grid_on():
xoff = all(not g.gridOn
for g in self.plt.gca().xaxis.get_major_ticks())
yoff = all(not g.gridOn
for g in self.plt.gca().yaxis.get_major_ticks())
return not (xoff and yoff)
spndx = 1
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=False)
obj.plot(kind=kind, **kws)
self.assertFalse(is_grid_on())
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=True)
obj.plot(kind=kind, grid=False, **kws)
self.assertFalse(is_grid_on())
if kind != 'pie':
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=True)
obj.plot(kind=kind, **kws)
self.assertTrue(is_grid_on())
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=False)
obj.plot(kind=kind, grid=True, **kws)
self.assertTrue(is_grid_on())
def _maybe_unpack_cycler(self, rcParams, field='color'):
"""
Compat layer for MPL 1.5 change to color cycle
Before: plt.rcParams['axes.color_cycle'] -> ['b', 'g', 'r'...]
After : plt.rcParams['axes.prop_cycle'] -> cycler(...)
"""
if self.mpl_ge_1_5_0:
cyl = rcParams['axes.prop_cycle']
colors = [v[field] for v in cyl]
else:
colors = rcParams['axes.color_cycle']
return colors
def _check_plot_works(f, filterwarnings='always', **kwargs):
import matplotlib.pyplot as plt
ret = None
with warnings.catch_warnings():
warnings.simplefilter(filterwarnings)
try:
try:
fig = kwargs['figure']
except KeyError:
fig = plt.gcf()
plt.clf()
ax = kwargs.get('ax', fig.add_subplot(211)) # noqa
ret = f(**kwargs)
assert_is_valid_plot_return_object(ret)
try:
kwargs['ax'] = fig.add_subplot(212)
ret = f(**kwargs)
except Exception:
pass
else:
assert_is_valid_plot_return_object(ret)
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
tm.close(fig)
return ret
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
| mit |
pkuyym/Paddle | python/paddle/dataset/uci_housing.py | 4 | 4380 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
UCI Housing dataset.
This module will download dataset from
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
parse training set and test set into paddle reader creators.
"""
import os
import numpy as np
import tempfile
import tarfile
import os
import paddle.dataset.common
__all__ = ['train', 'test']
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
MD5 = 'd4accdce7a25600298819f8e28e8d593'
feature_names = [
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
'PTRATIO', 'B', 'LSTAT', 'convert'
]
UCI_TRAIN_DATA = None
UCI_TEST_DATA = None
FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fluid/fit_a_line.fluid.tar'
FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b'
def feature_range(maximums, minimums):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
feature_num = len(maximums)
ax.bar(range(feature_num), maximums - minimums, color='r', align='center')
ax.set_title('feature scale')
plt.xticks(range(feature_num), feature_names)
plt.xlim([-1, feature_num])
fig.set_figheight(6)
fig.set_figwidth(10)
if not os.path.exists('./image'):
os.makedirs('./image')
fig.savefig('image/ranges.png', dpi=48)
plt.close(fig)
def load_data(filename, feature_num=14, ratio=0.8):
global UCI_TRAIN_DATA, UCI_TEST_DATA
if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:
return
data = np.fromfile(filename, sep=' ')
data = data.reshape(data.shape[0] / feature_num, feature_num)
maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(
axis=0) / data.shape[0]
feature_range(maximums[:-1], minimums[:-1])
for i in xrange(feature_num - 1):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
offset = int(data.shape[0] * ratio)
UCI_TRAIN_DATA = data[:offset]
UCI_TEST_DATA = data[offset:]
def train():
"""
UCI_HOUSING training set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Training reader creator
:rtype: callable
"""
global UCI_TRAIN_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TRAIN_DATA:
yield d[:-1], d[-1:]
return reader
def test():
"""
UCI_HOUSING test set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Test reader creator
:rtype: callable
"""
global UCI_TEST_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TEST_DATA:
yield d[:-1], d[-1:]
return reader
def fluid_model():
parameter_tar = paddle.dataset.common.download(
FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar')
tar = tarfile.TarFile(parameter_tar, mode='r')
dirpath = tempfile.mkdtemp()
tar.extractall(path=dirpath)
return dirpath
def predict_reader():
"""
It returns just one tuple data to do inference.
:return: one tuple data
:rtype: tuple
"""
global UCI_TEST_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
return (UCI_TEST_DATA[0][:-1], )
def fetch():
paddle.dataset.common.download(URL, 'uci_housing', MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.dataset.common.convert(path, train(), 1000, "uci_housing_train")
paddle.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
| apache-2.0 |
joshgabriel/MPInterfaces | examples/bands_dos.py | 2 | 7987 | # coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
reads in KPOINTS(with labels) and vasprun.xml files and
plots band diagram and density of states
from http://gvallver.perso.univ-pau.fr/?p=587
"""
from six.moves import range
from six.moves import zip
import numpy as np
try:
# To use matplotlib on Hipergator, uncomment the following 2 lines:
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.gridspec import GridSpec
except ImportError:
print("Install matplotlib")
plt = None
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.electronic_structure.core import Spin
def rgbline(ax, k, e, red, green, blue, alpha=1.):
# creation of segments based on
# http://nbviewer.ipython.org/urls/raw.github.com/dpsanders/matplotlib-examples/master/colorline.ipynb
# amount of r,g,b weighted by the s, p, d contribution to the density
pts = np.array([k, e]).T.reshape(-1, 1, 2)
seg = np.concatenate([pts[:-1], pts[1:]], axis=1)
nseg = len(k) - 1
r = [0.5 * (red[i] + red[i + 1]) for i in range(nseg)]
g = [0.5 * (green[i] + green[i + 1]) for i in range(nseg)]
b = [0.5 * (blue[i] + blue[i + 1]) for i in range(nseg)]
a = np.ones(nseg, np.float) * alpha
lc = LineCollection(seg, colors=list(zip(r, g, b, a)), linewidth=2)
ax.add_collection(lc)
if __name__ == "__main__":
# --------------------------------------------------------------
# read data
# --------------------------------------------------------------
# readin bandstructure and density of states from vasprun.xml file
run = Vasprun("vasprun.xml", parse_projected_eigen=True)
bands = run.get_band_structure("KPOINTS", line_mode=True,
efermi=run.efermi)
complete_dos = run.complete_dos
print('cbm and vbm ', complete_dos.get_cbm_vbm())
print('gap = ', complete_dos.get_gap())
# get orbital projected DOS.
spd_dos = complete_dos.get_spd_dos()
# kpoints labels, must conform with the label in the KPOINTS file
labels = [r"$G$", r"$X$", r"$M$", r"$G$"]
# --------------------------------------------------------------
# compute a dictionary of projections on elements and specific orbitals
# A dictionary of Elements and Orbitals for which we want
# to have projections on. It is given as: {Element:[orbitals]},
# e.g., {'Cu':['d','s']}
# --------------------------------------------------------------
pbands = bands.get_projections_on_elts_and_orbitals(
{"Fe": ["s", "p", "d"]})
contrib = np.zeros((bands.nb_bands, len(bands.kpoints), 3))
# loop over the energies and kpoints
for b in range(bands.nb_bands):
for k in range(len(bands.kpoints)):
sc = pbands[Spin.up][b][k]["Fe"]["s"] ** 2
pc = pbands[Spin.up][b][k]["Fe"]["p"] ** 2
dc = pbands[Spin.up][b][k]["Fe"]["d"] ** 2
tot = sc + pc + dc
if tot != 0.0:
contrib[b, k, 0] = sc / tot # normalized s contribution
contrib[b, k, 1] = pc / tot # normalized p contribution
contrib[b, k, 2] = dc / tot # normalized d contribution
# --------------------------------------------------------------
# set up matplotlib plot
# --------------------------------------------------------------
# general options for plot
font = {'family': 'serif', 'size': 24}
plt.rc('font', **font)
# set up 2 graph with aspec ration 2/1
# plot 1: bands diagram
# plot 2: Density of States
gs = GridSpec(1, 2, width_ratios=[2, 1])
fig = plt.figure(figsize=(11.69, 8.27))
fig.suptitle("HSE Band Structure of FeS-litharge")
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1]) # , sharey=ax1)
# --------------------------------------------------------------
# energies on the y axis
# set ylimit for the plot
# max and min energy values computed from the read in data
# --------------------------------------------------------------
emin = 1e100
emax = -1e100
for spin in bands.bands.keys():
for b in range(bands.nb_bands):
emin = min(emin, min(bands.bands[spin][b]))
emax = max(emax, max(bands.bands[spin][b]))
emin -= bands.efermi + 1
emax -= bands.efermi - 1
emin1 = -5
emax1 = 5
ax1.set_ylim(emin1, emax1)
ax2.set_ylim(emin, emax)
# --------------------------------------------------------------
# Band Diagram
# --------------------------------------------------------------
# plot bands using rgb mapping
for b in range(bands.nb_bands):
rgbline(ax1, list(range(len(bands.kpoints))),
[e - bands.efermi for e in bands.bands[Spin.up][b]],
contrib[b, :, 0], contrib[b, :, 1], contrib[b, :, 2])
rgbline(ax1, list(range(len(bands.kpoints))),
[e - bands.efermi for e in bands.bands[Spin.down][b]],
contrib[b, :, 0], contrib[b, :, 1], contrib[b, :, 2],
alpha=0.2)
# ax1.set_title("Band diagram")
ax1.set_xlim(0, len(bands.kpoints))
ax1.set_xlabel("k-points")
ax1.set_ylabel(r"$E - E_f$ / eV")
ax1.grid()
# fermi level at 0, draw a line through it
ax1.hlines(y=0, xmin=0, xmax=len(bands.kpoints), color="k", lw=2)
# kpoint labels
# draw vertical lines along each kpoint label
# and put tick marks
nlabs = len(labels)
step = len(bands.kpoints) / (nlabs - 1)
for i, lab in enumerate(labels):
ax1.vlines(i * step, emin, emax, "k")
ax1.set_xticks([i * step for i in range(nlabs)])
ax1.set_xticklabels(labels)
# --------------------------------------------------------------
# Density of states
# --------------------------------------------------------------
# on the x -axis we have the density of states
# to plot both spin up and down density in the same plot
# set the xlimit from a negative to positive value
# ax2.set_title("Density of States")
ax2.set_xlim(-10, 10)
ax2.set_xticklabels([])
ax2.set_xlabel("Density of States", labelpad=28)
ax2.set_yticklabels([])
ax2.grid()
# line through fermi level, E=0
ax2.hlines(y=0, xmin=0, xmax=8, color="k", lw=2)
# plot spd contributions, SPIN UP, positive direction
ax2.plot(spd_dos["S"].densities[Spin.up],
run.tdos.energies - run.efermi,
"r-", label="3s", lw=2)
ax2.plot(spd_dos["P"].densities[Spin.up],
run.tdos.energies - run.efermi,
"g-", label="3p", lw=2)
ax2.plot(spd_dos["D"].densities[Spin.up],
run.tdos.energies - run.efermi,
"b-", label="3d", lw=2)
# plot spd contribution, SPIN DOWN, negative direction
ax2.plot(-spd_dos["S"].densities[Spin.down],
run.tdos.energies - run.efermi,
"r-", label="3s", lw=2)
ax2.plot(-spd_dos["P"].densities[Spin.down],
run.tdos.energies - run.efermi,
"g-", label="3p", lw=2)
ax2.plot(-spd_dos["D"].densities[Spin.down],
run.tdos.energies - run.efermi,
"b-", label="3d", lw=2)
# plot total dos
total_dos = run.tdos.densities[Spin.up] + run.tdos.densities[Spin.down]
ax2.fill_between(total_dos, 0, run.tdos.energies - run.efermi,
color=(0.7, 0.7, 0.7), facecolor=(0.7, 0.7, 0.7))
ax2.plot(total_dos, run.tdos.energies - run.efermi,
color=(0.3, 0.3, 0.3), label="total DOS")
# set legend
# ax2.legend(fancybox=True, shadow=True, prop={'size':18})
plt.subplots_adjust(wspace=0)
# save plot as pdf
# plt.show()
plt.savefig("band_dos_up_down.pdf", format="pdf")
| mit |
olologin/scikit-learn | sklearn/metrics/scorer.py | 23 | 13077 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.model_selection.GridSearchCV` or
:func:`sklearn.model_selection.cross_val_score` as the ``scoring``
parameter, to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(metric, pos_label=None,
average=average)
| bsd-3-clause |
GeoscienceAustralia/PyRate | docs/conf.py | 1 | 5820 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import datetime
import sphinx_rtd_theme
__version__ = "0.5.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinxcontrib.programoutput'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
from recommonmark.parser import CommonMarkParser
source_parsers = {'.md': CommonMarkParser}
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The main toctree document.
main_doc = 'index'
# General information about the project.
project = 'PyRate'
copyright = str(datetime.datetime.now().year)+', Geoscience Australia'
author = 'Geoscience Australia InSAR Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.4.0"
# The full version, including alpha/beta/rc tags.
release = "0.4.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = "PyRate_logo.png"
html_show_sourcelink = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'collapse_navigation': False,
'sticky_navigation': True,
'display_version': False,
# 'titles_only': True,
'navigation_depth': 2,
'logo_only': True,
'prev_next_buttons_location': 'top',
'style_nav_header_background': 'white'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [os.path.join('_build', 'html', '_static')]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyRatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(main_doc, 'PyRate.tex', 'PyRate Documentation',
'Geoscience Australia InSAR team', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(main_doc, 'pyrate', 'PyRate Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(main_doc, 'PyRate', 'PyRate Documentation',
author, 'PyRate', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# link check ignore
linkcheck_ignore = [r'http://localhost:\d+/',
'https://github.com/Nekroze/PyRate/fork']
def setup(app):
app.add_stylesheet('css/custom.css')
| apache-2.0 |
DSLituiev/scikit-learn | sklearn/datasets/svmlight_format.py | 19 | 16759 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
kulasinski/porosity-tools | libs_porosity.py | 1 | 12537 | import getopt, os, sys, math
import numpy as np
from scipy.optimize import minimize
from matplotlib import pyplot as plt
def print_help():
print " This code analyzes pore structure of an MD structure in Gromacs format."
print " Usage:"
print " -f <input.gro> # input structure file, in .gro format"
print " <grid.npy> # or previously calculated grid in .npy format"
print " -o <out> # generic name for output files"
print " -d <output> # output directory"
print " -r <float> # resolution of the grid elements in nm, default: 0.01 nm"
print " -m <float> # radius of the probe molecule in nm,"
print " default is 0.14 nm (H2O radius)."
print " If radius is 0 Van der Waals surface is probed."
print " -g <gro|npy|no> # save the grid to .gro file, numpy .npy file,"
print " or don't (default)."
print " -p <y/n> # if your structure is fully periodic, default is yes."
print " -c <y/n> # calculate chord and diameter distribution"
print " -t <x|y|z> # calculate tortuosity in given direction"
def process_input(arguments):
input_structure = 'conf.gro'
output = 'out'
odir = 'output'
spacing = 0.01 # nm
radius = 0.14 # H2O molecule
ifsavegrid = 'no'
ifperiodic = True
ifdistribution = False
tdir = ''
if len(arguments)<2:
print_help()
sys.exit(0)
try:
opts, args = getopt.getopt(arguments[1:], "f:o:d:r:m:g:p:c:t:no_usable")
# print opts
except getopt.GetoptError:
print " Inappropriate use."
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit(0)
elif opt in "-f":
input_structure = arg
elif opt in "-o":
output = arg
elif opt in "-d":
odir = arg
elif opt in "-r":
spacing = float(arg)
if spacing == 0:
print " ERROR: Spacing must be greater than 0! Exiting."
sys.exit(0)
elif opt in "-m":
radius = float(arg)
if radius < 0:
print " ERROR: Radius must be larger or equal zero! Exiting."
sys.exit(0)
elif opt in "-g":
if arg in ('gro','.gro'):
ifsavegrid = 'gro'
elif arg in ('npy','.npy'):
ifsavegrid = 'npy'
if arg in ('No','no'):
ifsavegrid = 'no'
elif opt in "-p":
if arg in ('n','N','No','no'):
ifperiodic = False
elif opt in "-c":
if arg in ('y','Y','yes','Yes'):
ifdistribution = True
elif opt in "-t":
if arg in ('x','X','y','Y','z','Z'):
tdir = arg.lower()
return input_structure, output, odir, spacing, radius, ifsavegrid, ifperiodic, ifdistribution, tdir
def read_structure(filename):
print ' Reading',filename,'...\r',
fin = open(filename,'r') # in .gro format without velocities
ll = fin.readlines()
fin.close()
# add here later atom type
N = int(ll[1])
X = float(ll[-1].split()[0])
Y = float(ll[-1].split()[1])
Z = float(ll[-1].split()[2])
# read coordinates and wrap them up to a untilted shape
coords = [None]*3*N
atomtype = [None]*N
for i in range(0,N):
lll = ll[i+2]
atomtype[i] = ll[i+2][9:15].strip()[0]
# dividing modulo is to wrap up the coords
coords[3*i] = float(lll[20:28])%X
coords[3*i+1] = float(lll[28:36])%Y
coords[3*i+2] = float(lll[36:44])%Z
print ' Number of atoms:', N
print ' System size: %.5f x %.5f x %.5f' % (X, Y, Z)
return atomtype, coords, X, Y, Z
def distance(x,y,z,X,Y,Z):
return math.sqrt((x-X)*(x-X)+(y-Y)*(y-Y)+(z-Z)*(z-Z))
def remove_grid_points_pbc(G, atomtype, coords, X,Y,Z, NX,NY,NZ, spacing, radius):
mindistdict = {'H':0.12,'C':0.17,'N':0.155,'O':0.152,'F':0.147,'P':0.18,'S':0.18,'Cl':0.175}
probemol_radius = radius # r=0.14 nm for H2O
overlap = 0.2
for a in range(0,len(coords)/3):
sys.stdout.write(' Calculating porosity grid: %d%%\r' % int(100*float(a)/(len(coords)/3)+1))
sys.stdout.flush()
ax = coords[3*a]
ay = coords[3*a+1]
az = coords[3*a+2]
mindist=mindistdict[atomtype[a]]
dist = (mindist+probemol_radius)*(1-overlap)
for i in range(int((ax-dist)/spacing)-1,int((ax+dist)/spacing)+2):
for j in range(int((ay-dist)/spacing)-1,int((ay+dist)/spacing)+2):
for k in range(int((az-dist)/spacing)-1,int((az+dist)/spacing)+2):
x = i*spacing
y = j*spacing
z = k*spacing
i_= int(x/spacing+0.001)%NX
j_= int(y/spacing+0.001)%NY
k_= int(z/spacing+0.001)%NZ
if G[i_,j_,k_]>0:
if distance(ax,ay,az,x,y,z)<=dist:
G[i_,j_,k_]=0
return G
def remove_grid_points(G, atomtype, coords, X,Y,Z, NX,NY,NZ, spacing, radius):
mindistdict = {'H':0.12,'C':0.17,'N':0.155,'O':0.152,'F':0.147,'P':0.18,'S':0.18,'Cl':0.175}
probemol_radius = radius # r=0.14 nm for H2O
for a in range(0,len(coords)/3):
sys.stdout.write(' Calculating porosity grid: %d%%\r' % int(100*float(a)/(len(coords)/3)+1))
sys.stdout.flush()
ax = coords[3*a]
ay = coords[3*a+1]
az = coords[3*a+2]
mindist=mindistdict[atomtype[a]]
dist = mindist+probemol_radius
for i in range(max(int((ax-dist)/spacing)-1,0),min(int((ax+dist)/spacing)+2,NX)):
for j in range(max(int((ay-dist)/spacing)-1,0),min(int((ay+dist)/spacing)+2,NY)):
for k in range(max(int((az-dist)/spacing)-1,0),min(int((az+dist)/spacing)+2,NZ)):
if G[i,j,k]>0:
x = i*spacing
y = j*spacing
z = k*spacing
if distance(ax,ay,az,x,y,z)<=dist:
G[i,j,k]=0
return G
def save_grid2gro(filename,G,nx,ny,nz,d):
n = 0 # current atom number
N = G.sum() # all atoms
x = 0 # atom coordinates
y = 0
z = 0
fout = open(filename, 'w')
fout.write('GRID: atoms are porosity network elements\n')
fout.write(' %d\n' % N)
for k in range(0,nz):
z = k*d
for j in range(0,ny):
y = j*d
for i in range(0,nx):
x = i*d
if G[i,j,k] != 0:
fout.write(' 1RESX C%s %.3f %.3f %.3f\n' % (str(n+1).rjust(5)[-5:], x,y,z))
n = n + 1
fout.write(' %f %f %f\n' % (nx*d, ny*d, nz*d))
fout.close()
def get_surf(G,NX,NY,NZ,d):
grad = np.gradient(G)
gx,gy,gz = np.abs(grad[0]),np.abs(grad[1]),np.abs(grad[2])
surf = 0
for i in range(NX):
for j in range(NY):
for k in range(NZ):
if gx[i,j,k]>0 or gy[i,j,k]>0 or gz[i,j,k]>0:
surf += 1
return surf*d*d
def split_chord(ch):
N = ch.shape[0]
grad = ch[1:N]-ch[0:N-1]
start = np.where(grad==-1)[0]
end = np.where(grad== 1)[0]
# print 'chord',ch
l_list = []
for i in range(0,start.shape[0]):
temp = np.where(end>start[i])[0]
if temp.shape[0]>0:
e = temp[0]
l = end[e]-start[i]
l_list.append(l)
# print 'list',l_list
return l_list
def get_chord_vec_full(N,grid,spacing,ch):
ch = int(ch/spacing)
NX,NY,NZ = grid.shape[0],grid.shape[1],grid.shape[2]
chords = np.zeros([N,max(NX,NY,NZ)])
for i in range(0,N):
# toss chord direction
dir = np.random.randint(3)
if dir == 0:
Y = np.random.randint(NY)
Z = np.random.randint(NZ)
chords[i,0:NX] = grid[:,Y,Z]
elif dir == 1:
X = np.random.randint(NX)
Y = np.random.randint(NY-ch)
Z = np.random.randint(NZ)
chords[i,0:NY] = grid[X,:,Z]
else:
X = np.random.randint(NX)
Y = np.random.randint(NY)
Z = np.random.randint(NZ-ch)
chords[i,0:NZ] = grid[X,Y,:]
return chords
def get_pore_distr(chords,spacing):
N = chords.shape[0]
radii = []
for i in range(0,N):
radius = split_chord(chords[i,:])
radii = radii + radius
sys.stdout.write(' Evaluating chords %d%%\r' % int(float(i+1)/N*100))
sys.stdout.flush()
edges = np.arange(-0.5,chords.shape[1]+0.5,1)
p, edges = np.histogram(radii,edges,normed=True)
centers = np.arange(0,chords.shape[1])*spacing
# trim zeros at the end
p = np.trim_zeros(p,'b')
centers = centers[0:p.shape[0]]
return p,centers
def pd2pc(c, *pd):
N = len(pd)
pc = np.zeros(N)
for i in range(1,N): # skip 0
# pc[0:i+1] = pc[0:i+1] + pd[i]*c[0:i+1]/(c[i]*c[i])# p(c) = p(di)*c/c_i^2
pc[0:i+1] = pc[0:i+1] + pd[i]*c[0:i+1]/(c[i]*c[i])# p(c) = p(di)*c/c_i^2
# add penalty
# if pd[i] < 0:
# pc[0:i+1] = pc[0:i+1]+pd[i]*pd[i]*10000 # square the negative pd(i) and increase enormously the p(c)
return pc
def optimize_pd(par0, xdata, ydata):
N = par0.shape[0]
# par, parcov = optimization.curve_fit(pd2pc, xdata, ydata, p0=par0)
err = lambda p: np.mean(((pd2pc(xdata,*p)-ydata)/1)**2)
bounds = []
for i in range(0,N):
bounds.append((-1,None))
par = minimize(err, par0, bounds=bounds, method="L-BFGS-B").x
return np.asarray(par)
def transform(c,pc_copy):
pc = np.copy(pc_copy)
# given is range of chords, c, and its probability, pd = p(c)
N = c.shape[0]
pd = np.zeros(N) # probability of d
if False: # direct calculation of p(d)
for i in range(0,N):
c_curr, pc_curr = c[N-1-i], pc[N-1-i]
if c_curr > 0.0:
pd_curr = pc_curr*c_curr # since p(c) ~ p(d_i)*c / d_i^2 and hence p(c=d_i) ~ p(d_i)/d_i
delta_pc = c[0:N-i]*pd_curr/(c_curr*c_curr)
pc[0:N-i] = pc[0:N-i] - delta_pc # reduced p(c), original
pd[N-1-i] = pd_curr
pd = smooth(pd,10)
# pd = shift2avoid0s(pd)
if True: # least squares
pd = optimize_pd(np.copy(pc_copy), c, pc)
# c, pd = rebin(c,pd,2)
pd = pd/pd.sum() # normalize
return c, pd
def savefig(V,name):
# figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
imgplot = plt.imshow(V) # ,interpolation='none')
plt.colorbar()
plt.savefig('%s.png' % name)
plt.close()
def guessU(U,Nx):
for i in range(0,Nx):
U[i,:,:] = 1-1.0*i/Nx
U[ 0,:,:] = 1
U[-1,:,:] = 0
return U
def equilibrateU(U,grid,Nx,Ny,Nz, plane,odir,output,spacing):
x,y,z = np.arange(0,Nx), np.arange(0,Ny), np.arange(0,Nz)
ftort = open(odir+'/'+output+'_tortuosity-%splane.dat' % plane,'w')
xf,yf,zf = np.mod(x+1,Nx), np.mod(y+1,Ny), np.mod(z+1,Nz) # array shifted forward
xb,yb,zb = np.mod(x-1,Nx), np.mod(y-1,Ny), np.mod(z-1,Nz) # array shifted backward
neighbors = grid[xf,:,:]+grid[xb,:,:]+grid[:,yf,:]+grid[:,yb,:]+grid[:,:,zf]+grid[:,:,zb] # number of non-zero neighbors
# make zero-neighors disappear
arr = np.where(neighbors == 0)
neighbors[arr] = 9999
tortvec = []
# loop over equilibrating U and stop when reached specified accuracy
old_t = 0.0
U[0,:,:] = 1 # just in case
t_prev, t_prev_prev, fluct = 0.,0.,1.
for i in range(0,10*Nx*Ny*Nz): # Nx*Ny*Nz is the limit
Ug = U*grid # by this multiplication ignore the empty sites, i.e. where grid==0
U[1:-1,:,:] = ( Ug[0:-2,:,:] + Ug[2:Nx,:,:] + Ug[1:-1,yb,:] + Ug[1:-1,yf,:] + Ug[1:-1,:,zb] + Ug[1:-1,:,zf] )/neighbors[1:-1,:,:]
I = Ug[-2,:,:].sum()
Rideal = float(Nx-1)/(Ny*Nz)
Reff = 1/I # R = U/I
tortuosity = Reff/Rideal
sys.stdout.write(' (%d%%) ~conductivity^-1: %.4e (fluct %.2f%%) \r ' % (int(10*i/(Nx*Ny*Nz)),tortuosity,100*fluct))
sys.stdout.flush()
if i%500 == 0: # save progress every N steps
fluct = np.std(np.array([tortuosity, t_prev, t_prev_prev]))/tortuosity
if fluct < 0.01: #0.0001: # less than 0.01%b
break
t_prev_prev = t_prev
t_prev = tortuosity
savefig(U[1:,:,:].sum(axis=2)/Nz,odir+'/'+output+'_U-%splane' % (plane))
ftort.write('%d %.4f\n' % (i,tortuosity))
tortvec.append(tortuosity)
plt.plot(np.linspace(0,len(tortvec)-1,len(tortvec)),np.asarray(tortvec),linewidth=4.0,alpha=0.5)
plt.savefig(odir+'/'+output+'_tortuosity-%splane.png' % plane)
plt.close()
Ug = U*grid
plt.plot(np.arange(Nx)*spacing,Ug.sum(axis=2).sum(axis=1)/grid.sum(axis=2).sum(axis=1),'g-',linewidth=4.0,alpha=0.5)
plt.savefig(odir+'/'+output+'_Uprofile-%splane.png' % plane)
plt.close()
ftort.close()
return U
def drawI(U,grid, Nx,Ny,Nz, epsilon):
y, z = np.arange(0,Ny), np.arange(0,Nz)
yf,zf = np.mod(y+1,Ny), np.mod(z+1,Nz) # array shifted forward
yb,zb = np.mod(y-1,Ny), np.mod(z-1,Nz) # array shifted backward
# Ug = U*grid
Imap,Iim,Iip,Ijm,Ijp,Ikm,Ikp = np.zeros([Nx,Ny,Nz]),np.zeros([Nx,Ny,Nz]),np.zeros([Nx,Ny,Nz]),np.zeros([Nx,Ny,Nz]),np.zeros([Nx,Ny,Nz]),np.zeros([Nx,Ny,Nz]),np.zeros([Nx,Ny,Nz])
Iim[1:Nx,:,:] = U[1:Nx,:,:] - U[0:-1,:,:]
Iip[0:-1,:,:] = U[0:-1,:,:] - U[1:Nx,:,:]
Ijm[:,:,:] = U[:,:,:] - U[:,yb,:]
Ijp[:,:,:] = U[:,:,:] - U[:,yf,:]
Ikm[:,:,:] = U[:,:,:] - U[:,:,zb]
Ikp[:,:,:] = U[:,:,:] - U[:,:,zf]
Imap = np.abs(Iim)+np.abs(Iip)+np.abs(Ijm)+np.abs(Ijp)+np.abs(Ikm)+np.abs(Ikp)
Imap[1:-1,:,:] = Imap[1:-1,:,:]/2
Imap = Imap*grid
Imap[np.where(Imap < epsilon)] = 0
eff_porosity = float(np.where(Imap > 0)[0].shape[0])/(Nx*Ny*Nz)
print ' Effective porosity: %.2f%% ' % (100*eff_porosity)
return eff_porosity
| gpl-3.0 |
gnieboer/gnuradio | gr-utils/python/utils/plot_psd_base.py | 75 | 12725 | #!/usr/bin/env python
#
# Copyright 2007,2008,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
from scipy import log10
from gnuradio.eng_option import eng_option
class plot_psd_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.psdfftsize = options.psd_size
self.specfftsize = options.spec_size
self.dospec = options.enable_spec # if we want to plot the spectrogram
self.datatype = getattr(scipy, datatype) #scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename),
weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.92, "File Position: ",
weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = scipy.array(self.sp_iq.get_xlim())
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % self.position)
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
return False
else:
# retesting length here as newer version of scipy does not throw a MemoryError, just
# returns a zero-length array
if(len(self.iq) > 0):
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.iq_psd, self.freq = self.dopsd(self.iq)
return True
else:
print "End of File"
return False
def dopsd(self, iq):
''' Need to do this here and plot later so we can do the fftshift '''
overlap = self.psdfftsize/4
winfunc = scipy.blackman
psd,freq = mlab.psd(iq, self.psdfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.psdfftsize),
noverlap = overlap)
psd = 10.0*log10(abs(psd))
return (psd, freq)
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]]
psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]]
specdims = [0.2, 0.125, 0.6, 0.3]
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for PSD plot
self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec])
self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold")
self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
r = self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time(self.time, self.iq) # draw the plot
self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD
self.draw_psd(self.freq, self.iq_psd) # draw the plot
if self.dospec:
# Subplot for spectrogram plot
self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims)
self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold")
self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.draw_spec(self.time, self.iq)
draw()
def draw_time(self, t, iq):
reals = iq.real
imags = iq.imag
self.plot_iq[0].set_data([t, reals])
self.plot_iq[1].set_data([t, imags])
self.sp_iq.set_xlim(t.min(), t.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_psd(self, f, p):
self.plot_psd[0].set_data([f, p])
self.sp_psd.set_ylim([p.min()-10, p.max()+10])
self.sp_psd.set_xlim([f.min(), f.max()])
def draw_spec(self, t, s):
overlap = self.specfftsize/4
winfunc = scipy.blackman
self.sp_spec.clear()
self.sp_spec.specgram(s, self.specfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.specfftsize),
noverlap = overlap, xextent=[t.min(), t.max()])
def update_plots(self):
self.draw_time(self.time, self.iq)
self.draw_psd(self.freq, self.iq_psd)
if self.dospec:
self.draw_spec(self.time, self.iq)
self.xlim = scipy.array(self.sp_iq.get_xlim()) # so zoom doesn't get called
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
#xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(newxlim[0]))))
xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq))
iq = scipy.array(self.iq[xmin : xmax])
time = scipy.array(self.time[xmin : xmax])
iq_psd, freq = self.dopsd(iq)
self.draw_psd(freq, iq_psd)
self.xlim = scipy.array(self.sp_iq.get_xlim())
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
r = self.get_data()
if(r):
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
r = self.get_data()
if(r):
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec."
parser = OptionParser(option_class=eng_option, conflict_handler="resolve",
usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=8192,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="eng_float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
parser.add_option("", "--psd-size", type="int", default=1024,
help="Set the size of the PSD FFT [default=%default]")
parser.add_option("", "--spec-size", type="int", default=256,
help="Set the size of the spectrogram FFT [default=%default]")
parser.add_option("-S", "--enable-spec", action="store_true", default=False,
help="Turn on plotting the spectrogram [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_psd_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_psd_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/filter/suffix_filter.py | 1 | 25496 | from math import ceil, floor
from joblib import delayed, Parallel
import pandas as pd
import pyprind
from py_stringsimjoin.filter.filter import Filter
from py_stringsimjoin.filter.filter_utils import get_overlap_threshold, \
get_prefix_length
from py_stringsimjoin.utils.generic_helper import convert_dataframe_to_array, \
find_output_attribute_indices, get_attrs_to_project, \
get_num_processes_to_launch, get_output_header_from_tables, \
get_output_row_from_tables, remove_redundant_attrs, split_table
from py_stringsimjoin.utils.missing_value_handler import \
get_pairs_with_missing_value
from py_stringsimjoin.utils.token_ordering import gen_token_ordering_for_lists,\
gen_token_ordering_for_tables, order_using_token_ordering
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_key_attr, validate_input_table, \
validate_threshold, validate_tokenizer_for_sim_measure, \
validate_output_attrs, validate_sim_measure_type
class SuffixFilter(Filter):
"""Finds candidate matching pairs of strings using suffix filtering
technique.
For similarity measures such as cosine, Dice, Jaccard and overlap, the
filter finds candidate string pairs that may have similarity score greater
than or equal to the input threshold, as specified in "threshold". For
distance measures such as edit distance, the filter finds candidate string
pairs that may have distance score less than or equal to the threshold.
To know more about suffix filtering, refer to the paper
`Efficient Similarity Joins for Near Duplicate Detection
(Chuan Xiao, Wei Wang, Xuemin Lin and Jeffrey Xu Yu), WWW 08
<http://www.cse.unsw.edu.au/~weiw/files/WWW08-PPJoin-Final.pdf>`_.
Args:
tokenizer (Tokenizer): tokenizer to be used.
sim_measure_type (string): similarity measure type. Supported types are
'JACCARD', 'COSINE', 'DICE', 'OVERLAP' and 'EDIT_DISTANCE'.
threshold (float): threshold to be used by the filter.
allow_empty (boolean): A flag to indicate whether pairs in which both
strings are tokenized into an empty set of tokens should
survive the filter (defaults to True). This flag is not valid for
measures such as 'OVERLAP' and 'EDIT_DISTANCE'.
allow_missing (boolean): A flag to indicate whether pairs containing
missing value should survive the filter (defaults to False).
Attributes:
tokenizer (Tokenizer): An attribute to store the tokenizer.
sim_measure_type (string): An attribute to store the similarity measure
type.
threshold (float): An attribute to store the threshold value.
allow_empty (boolean): An attribute to store the value of the flag
allow_empty.
allow_missing (boolean): An attribute to store the value of the flag
allow_missing.
"""
def __init__(self, tokenizer, sim_measure_type, threshold,
allow_empty=True, allow_missing=False):
# check if the sim_measure_type is valid
validate_sim_measure_type(sim_measure_type)
sim_measure_type = sim_measure_type.upper()
# check if the input tokenizer is valid
validate_tokenizer_for_sim_measure(tokenizer, sim_measure_type)
# check if the threshold is valid
validate_threshold(threshold, sim_measure_type)
self.tokenizer = tokenizer
self.sim_measure_type = sim_measure_type
self.threshold = threshold
self.allow_empty = allow_empty
self.max_depth = 2
super(self.__class__, self).__init__(allow_missing)
def filter_pair(self, lstring, rstring):
"""Checks if the input strings get dropped by the suffix filter.
Args:
lstring,rstring (string): input strings
Returns:
A flag indicating whether the string pair is dropped (boolean).
"""
# If one of the inputs is missing, then check the allow_missing flag.
# If it is set to True, then pass the pair. Else drop the pair.
if pd.isnull(lstring) or pd.isnull(rstring):
return (not self.allow_missing)
# tokenize input strings
ltokens = self.tokenizer.tokenize(lstring)
rtokens = self.tokenizer.tokenize(rstring)
l_num_tokens = len(ltokens)
r_num_tokens = len(rtokens)
if l_num_tokens == 0 and r_num_tokens == 0:
if self.sim_measure_type == 'OVERLAP':
return True
elif self.sim_measure_type == 'EDIT_DISTANCE':
return False
else:
return (not self.allow_empty)
# order the tokens using the token ordering
token_ordering = gen_token_ordering_for_lists([ltokens, rtokens])
ordered_ltokens = order_using_token_ordering(ltokens, token_ordering)
ordered_rtokens = order_using_token_ordering(rtokens, token_ordering)
# compute prefix length
l_prefix_length = get_prefix_length(l_num_tokens,
self.sim_measure_type,
self.threshold,
self.tokenizer)
r_prefix_length = get_prefix_length(r_num_tokens,
self.sim_measure_type,
self.threshold,
self.tokenizer)
if l_prefix_length <= 0 or r_prefix_length <= 0:
return True
return self._filter_suffix(ordered_ltokens[l_prefix_length:],
ordered_rtokens[r_prefix_length:],
l_prefix_length,
r_prefix_length,
l_num_tokens, r_num_tokens)
def _filter_suffix(self, l_suffix, r_suffix,
l_prefix_num_tokens, r_prefix_num_tokens,
l_num_tokens, r_num_tokens):
# compute the overlap needed between the tokens to satisfy the
# threshold.
overlap_threshold = get_overlap_threshold(l_num_tokens, r_num_tokens,
self.sim_measure_type,
self.threshold,
self.tokenizer)
if (l_prefix_num_tokens >= overlap_threshold and
r_prefix_num_tokens >= overlap_threshold):
return False
# compute the maximum allowed hamming distance between the
# suffix tokens in order to satisfy the threshold.
hamming_dist_max = (l_num_tokens + r_num_tokens - 2 * overlap_threshold)
# compute lowerbound on the actual hamming distance between the suffix
# tokens.
hamming_dist = self._est_hamming_dist_lower_bound(
l_suffix, r_suffix,
l_num_tokens - l_prefix_num_tokens,
r_num_tokens - r_prefix_num_tokens,
hamming_dist_max, 1)
# if the lowerbound on the actual hamming distance is already above the
# maximum allowed hamming distance, then we can filter the pair.
if hamming_dist <= hamming_dist_max:
return False
return True
def filter_tables(self, ltable, rtable,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
l_out_attrs=None, r_out_attrs=None,
l_out_prefix='l_', r_out_prefix='r_',
n_jobs=1, show_progress=True):
"""Finds candidate matching pairs of strings from the input tables using
suffix filtering technique.
Args:
ltable (DataFrame): left input table.
rtable (DataFrame): right input table.
l_key_attr (string): key attribute in left table.
r_key_attr (string): key attribute in right table.
l_filter_attr (string): attribute in left table on which the filter
should be applied.
r_filter_attr (string): attribute in right table on which the filter
should be applied.
l_out_attrs (list): list of attribute names from the left table to
be included in the output table (defaults to None).
r_out_attrs (list): list of attribute names from the right table to
be included in the output table (defaults to None).
l_out_prefix (string): prefix to be used for the attribute names
coming from the left table, in the output table
(defaults to 'l\_').
r_out_prefix (string): prefix to be used for the attribute names
coming from the right table, in the output table
(defaults to 'r\_').
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is
given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used (where n_cpus is the total
number of CPUs in the machine). Thus for n_jobs = -2, all CPUs
but one are used. If (n_cpus + 1 + n_jobs) becomes less than 1,
then no parallel computing code will be used (i.e., equivalent
to the default).
show_progress (boolean): flag to indicate whether task progress
should be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs that survive the filter
(DataFrame).
"""
# check if the input tables are dataframes
validate_input_table(ltable, 'left table')
validate_input_table(rtable, 'right table')
# check if the key attributes and filter attributes exist
validate_attr(l_key_attr, ltable.columns,
'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_filter_attr, ltable.columns,
'filter attribute', 'left table')
validate_attr(r_filter_attr, rtable.columns,
'filter attribute', 'right table')
# check if the filter attributes are not of numeric type
validate_attr_type(l_filter_attr, ltable[l_filter_attr].dtype,
'filter attribute', 'left table')
validate_attr_type(r_filter_attr, rtable[r_filter_attr].dtype,
'filter attribute', 'right table')
# check if the output attributes exist
validate_output_attrs(l_out_attrs, ltable.columns,
r_out_attrs, rtable.columns)
# check if the key attributes are unique and do not contain
# missing values
validate_key_attr(l_key_attr, ltable, 'left table')
validate_key_attr(r_key_attr, rtable, 'right table')
# remove redundant attrs from output attrs.
l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr)
r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr)
# get attributes to project.
l_proj_attrs = get_attrs_to_project(l_out_attrs,
l_key_attr, l_filter_attr)
r_proj_attrs = get_attrs_to_project(r_out_attrs,
r_key_attr, r_filter_attr)
# Do a projection on the input dataframes to keep only the required
# attributes. Then, remove rows with missing value in filter attribute
# from the input dataframes. Then, convert the resulting dataframes
# into ndarray.
ltable_array = convert_dataframe_to_array(ltable, l_proj_attrs,
l_filter_attr)
rtable_array = convert_dataframe_to_array(rtable, r_proj_attrs,
r_filter_attr)
# computes the actual number of jobs to launch.
n_jobs = min(get_num_processes_to_launch(n_jobs), len(rtable_array))
if n_jobs <= 1:
# if n_jobs is 1, do not use any parallel code.
output_table = _filter_tables_split(
ltable_array, rtable_array,
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
self,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
show_progress)
else:
# if n_jobs is above 1, split the right table into n_jobs splits and
# filter each right table split with the whole of left table in a
# separate process.
r_splits = split_table(rtable_array, n_jobs)
results = Parallel(n_jobs=n_jobs)(delayed(_filter_tables_split)(
ltable_array, r_splits[job_index],
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
self,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
(show_progress and (job_index==n_jobs-1)))
for job_index in range(n_jobs))
output_table = pd.concat(results)
# If allow_missing flag is set, then compute all pairs with missing
# value in at least one of the filter attributes and then add it to the
# output obtained from applying the filter.
if self.allow_missing:
missing_pairs = get_pairs_with_missing_value(
ltable, rtable,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
False, show_progress)
output_table = pd.concat([output_table, missing_pairs])
# add an id column named '_id' to the output table.
output_table.insert(0, '_id', range(0, len(output_table)))
return output_table
def _est_hamming_dist_lower_bound(self, l_suffix, r_suffix,
l_suffix_num_tokens,
r_suffix_num_tokens,
hamming_dist_max, depth):
abs_diff = abs(l_suffix_num_tokens - r_suffix_num_tokens)
if (depth > self.max_depth or
l_suffix_num_tokens == 0 or
r_suffix_num_tokens == 0):
return abs_diff
if l_suffix_num_tokens == 1 and r_suffix_num_tokens==1:
return int(not l_suffix[0] == r_suffix[0])
r_mid = int(floor(r_suffix_num_tokens / 2))
r_mid_token = r_suffix[r_mid]
o = (hamming_dist_max - abs_diff) / 2
if l_suffix_num_tokens < r_suffix_num_tokens:
o_l = 1
o_r = 0
else:
o_l = 0
o_r = 1
# partition the tokens using the probe token.
(r_l, r_r, flag, diff) = self._partition(
r_suffix, r_mid_token, r_mid, r_mid)
(l_l, l_r, flag, diff) = self._partition(l_suffix, r_mid_token,
max(0, int(r_mid - o - abs_diff * o_l)),
min(l_suffix_num_tokens - 1,
int(r_mid + o + abs_diff * o_r)))
if flag == 0:
return hamming_dist_max + 1
r_l_num_tokens = len(r_l)
r_r_num_tokens = len(r_r)
l_l_num_tokens = len(l_l)
l_r_num_tokens = len(l_r)
hamming_dist = (abs(l_l_num_tokens - r_l_num_tokens) +
abs(l_r_num_tokens - r_r_num_tokens) + diff)
if hamming_dist > hamming_dist_max:
return hamming_dist
else:
# compute lower bound on hamming distance in the left partition.
hamming_dist_l = self._est_hamming_dist_lower_bound(
l_l, r_l, l_l_num_tokens, r_l_num_tokens,
hamming_dist_max -
abs(l_r_num_tokens - r_r_num_tokens) - diff,
depth + 1)
hamming_dist = (hamming_dist_l +
abs(l_r_num_tokens - r_r_num_tokens) + diff)
if hamming_dist <= hamming_dist_max:
# compute lower bound on hamming distance in the right
# partition.
hamming_dist_r = self._est_hamming_dist_lower_bound(
l_r, r_r, l_r_num_tokens, r_r_num_tokens,
hamming_dist_max - hamming_dist_l - diff,
depth + 1)
return hamming_dist_l + hamming_dist_r + diff
else:
return hamming_dist
def _partition(self, tokens, probe_token, left, right):
"""Partition the tokens using the probe token."""
right = min(right, len(tokens) - 1)
if right < left:
return [], [], 0, 1
if tokens[left] > probe_token:
return [], [], 0, 1
if tokens[right] < probe_token:
return [], [], 0, 1
pos = self._binary_search(tokens, probe_token, left, right)
tokens_left = tokens[0:pos]
if tokens[pos] == probe_token:
tokens_right = tokens[pos+1:len(tokens)]
diff = 0
else:
tokens_right = tokens[pos:len(tokens)]
diff = 1
return tokens_left, tokens_right, 1, diff
def _binary_search(self, tokens, probe_token, left, right):
"""Peform binary search to find the position of the probe token."""
if left == right:
return left
mid = int(floor((left + right) / 2))
mid_token = tokens[mid]
if mid_token == probe_token:
return mid
elif mid_token < probe_token:
return self._binary_search(tokens, probe_token, mid+1, right)
else:
return self._binary_search(tokens, probe_token, left, mid)
def _filter_tables_split(ltable, rtable,
l_columns, r_columns,
l_key_attr, r_key_attr,
l_filter_attr, r_filter_attr,
suffix_filter,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix, show_progress):
# find column indices of key attr, filter attr and output attrs in ltable
l_key_attr_index = l_columns.index(l_key_attr)
l_filter_attr_index = l_columns.index(l_filter_attr)
l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs)
# find column indices of key attr, filter attr and output attrs in rtable
r_key_attr_index = r_columns.index(r_key_attr)
r_filter_attr_index = r_columns.index(r_filter_attr)
r_out_attrs_indices = find_output_attribute_indices(r_columns, r_out_attrs)
# generate token ordering using tokens in l_filter_attr and r_filter_attr
token_ordering = gen_token_ordering_for_tables(
[ltable, rtable],
[l_filter_attr_index, r_filter_attr_index],
suffix_filter.tokenizer,
suffix_filter.sim_measure_type)
# ignore allow_empty flag for OVERLAP and EDIT_DISTANCE measures.
handle_empty = (suffix_filter.allow_empty and
suffix_filter.sim_measure_type not in ['OVERLAP', 'EDIT_DISTANCE'])
output_rows = []
has_output_attributes = (l_out_attrs is not None or
r_out_attrs is not None)
if show_progress:
prog_bar = pyprind.ProgBar(len(ltable))
for l_row in ltable:
l_string = l_row[l_filter_attr_index]
ltokens = suffix_filter.tokenizer.tokenize(l_string)
ordered_ltokens = order_using_token_ordering(ltokens, token_ordering)
l_num_tokens = len(ordered_ltokens)
l_prefix_length = get_prefix_length(l_num_tokens,
suffix_filter.sim_measure_type,
suffix_filter.threshold,
suffix_filter.tokenizer)
l_suffix = ordered_ltokens[l_prefix_length:]
for r_row in rtable:
r_string = r_row[r_filter_attr_index]
rtokens = suffix_filter.tokenizer.tokenize(r_string)
ordered_rtokens = order_using_token_ordering(rtokens,
token_ordering)
r_num_tokens = len(ordered_rtokens)
# If allow_empty flag is set, then add the pair to the output.
if handle_empty and l_num_tokens == 0 and r_num_tokens == 0:
if has_output_attributes:
output_row = get_output_row_from_tables(
l_row, r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices,
r_out_attrs_indices)
else:
output_row = [l_row[l_key_attr_index],
r_row[r_key_attr_index]]
output_rows.append(output_row)
continue
r_prefix_length = get_prefix_length(r_num_tokens,
suffix_filter.sim_measure_type,
suffix_filter.threshold,
suffix_filter.tokenizer)
if l_prefix_length <= 0 or r_prefix_length <= 0:
continue
if not suffix_filter._filter_suffix(l_suffix,
ordered_rtokens[r_prefix_length:],
l_prefix_length, r_prefix_length,
l_num_tokens, r_num_tokens):
if has_output_attributes:
output_row = get_output_row_from_tables(
l_row, r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices,
r_out_attrs_indices)
else:
output_row = [l_row[l_key_attr_index],
r_row[r_key_attr_index]]
output_rows.append(output_row)
if show_progress:
prog_bar.update()
output_header = get_output_header_from_tables(
l_key_attr, r_key_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix)
# generate a dataframe from the list of output rows
output_table = pd.DataFrame(output_rows, columns=output_header)
return output_table
| bsd-3-clause |
arbuz001/sms-tools | lectures/05-Sinusoidal-model/plots-code/spectral-sine-synthesis.py | 24 | 1316 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft, fftshift
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
Ns = 256
hNs = Ns/2
yw = np.zeros(Ns)
fs = 44100
freqs = np.array([1000.0, 4000.0, 8000.0])
amps = np.array([.6, .4, .6])
phases = ([0.5, 1.2, 2.3])
yploc = Ns*freqs/fs
ypmag = 20*np.log10(amps/2.0)
ypphase = phases
Y = UF.genSpecSines(freqs, ypmag, ypphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(fs*np.arange(Ns/2)/Ns, mY, 'r', lw=1.5)
plt.axis([0, fs/2.0,-100,0])
plt.title("mY, freqs (Hz) = 1000, 4000, 8000; amps = .6, .4, .6")
plt.subplot(3,1,2)
pY[pY==0]= np.nan
plt.plot(fs*np.arange(Ns/2)/Ns, pY, 'c', lw=1.5)
plt.axis([0, fs/2.0,-.01,3.0])
plt.title("pY, phases (radians) = .5, 1.2, 2.3")
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs, hNs), y, 'b', lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)])
plt.title("y")
plt.tight_layout()
plt.savefig('spectral-sine-synthesis.png')
plt.show()
| agpl-3.0 |
AlexanderFabisch/scikit-learn | examples/classification/plot_lda.py | 142 | 2419 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
mhue/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
brunston/stellarpyl | text.py | 3 | 12212 | # -*- coding: utf-8 -*-
"""
stellarPYL - python stellar spectra processing software
Copyright (c) 2016 Brunston Poon
@file: text
This program comes with absolutely no warranty.
"""
import time
def welcome():
print("""
Welcome to stellarPYL, Copyright (C) 2015 Brunston Poon, type 'licence' for info
Type 'quit' or 'exit' to leave the program. Use ctrl-c to force-interrupt.
TO VIEW HELP, WHICH WILL DESCRIBE A TYPICAL WORKFLOW SCENARIO, TYPE 'help'.
TO VIEW A LIST OF AVAILABLE FUNCTIONS & COMMANDS, TYPE 'commands'.
TO LEARN MORE ABOUT THIS PROGRAM, TYPE 'about'.
Help and information is also available online at http://st.bpbp.xyz/
or by viewing README.md
""")
return None
def firstrun():
print("""
SINCE this is your first time running the program, please take the time to read
the about, help, and commands documentation to familiarize yourself with this
program. For your convenience, press enter to view the commands now.
""")
return None
def about():
print("""
This is stellar spectra reduction and analysis command-line software written
using the Python 3.4 version of the Anaconda Scientific Python distribution. It
is work done for an internship at the Unversity of Hawaii in conjunction with
the St. Paul's School Engineering Honors program.
It aims to provide a simplified workflow for analyzing uncompressed TIFF stellar
spectra images obtained from a DSLR through a diffraction grating. The goals of
the project are: to automatically crop the image; to perform background
subtraction; to create an intensity plot of the spectrum (accounting for non-
orthogonal spectra); and to account for the use of a DSLR sensor by using either
a relative response function or an absolute response function to normalize the
intensity plot.
It is written by Brunston Poon.
""")
return None
def help():
print("""
AS AN ALTERNATIVE TO THE BELOW, make sure you set a default threshold using
'settings_threshold', and then simply type 'auto' to have the program do
the majority of the work.
You will be presented with a list of commands.
For a brand new image, run 'crop' first. Drag your file into the same directory
and enter the filename including the file extension. This program will accept
TIFF files, either in .tif or .tiff extension format. It will then ask you for a
threshold.
The threshold is used throughout the program to determine what data is relevant
and what parts of the image can be discarded without damaging the value of the
data. It needs to be an integer value between 0 and 765 as the threshold is
measured as the sum of the R, G, and B bin values in a pixel, therefore,
each RGB value can be an integer from 0-255; total value can be from 0-765. If
you do not have a value you are already using for all of your images, you can
type 'pixel_d' at the command prompt to run a function that plots the
distribution of binned pixel values in your image.
A typical threshold may be in the range from 100-130.
The program will run the cropping algorithm and ask for a filename to give to
the new file.
The next command you should run is 'intensity_saa'. It will take an image file
and a threshold and automatically perform linear regression to find the y=mx+b
line on which the spectral trace lies. It will then step one pixel at a time
along the spectral trace and add up all intensity values occuring along that
line.
The program will graph this intensity plot, which can be saved using the tools
already provided by matplotlib.
""")
return None
def commands():
print("""
---IMAGE PROCESSING---
- 'autoProcess' (short 'auto') -
autoProcess will take care of cropping and doing intensity plotting for you.
just provide a filename. In order to use this feature you must first set
a default threshold to use by using the 'settings_threshold' command.
- 'pixel_d' (short 'pd') -
takes an image and shows the pixel distribution of the image over the intensity
of the pixels.
- 'crop' -
takes an image and crop it based on your selected threshold.
- 'image_regression' (short 'imgreg') -
takes an image and finds the line which goes through the spectrum in that image.
- 'intensity_n' (short 'n') -
takes an image of a spectrum and converts it into an intensity plot using the
naive method of adding.
- 'intensity_saa' (short 'saa') -
takes an image of a spectrum and converts it into an intensity plot using
spatial anti-aliasing at a sub-sampling rate of one tenth of one pixel.
- 'show_threshold' -
see exactly what could be removed (assuming no crop stop has been set) using the
threshold that is currently set.
- 'show_regression' -
shows regressed line overlayed on the original (cropped) image.
- 'show_walks' -
shows walking lines overlayed on the original (cropped) image.
- 'dev_cgrowth' -
plots curve of growth
---PROGRAM---
- 'about' -
displays information about this program
- 'functions' -
where you are now
- 'help' -
brings up sample workflow
- 'settings_cropoverride' -
sets manual overrides for automatic cropping on the top, bottom, and sides
of an image. The default value is -1 (which is equivalent to no override)
for all values.
- 'settings_default' -
returns ALL settings back to default:
defaultThreshold = -1
autoIntensity = saa
manual overrides all to -1
step = 1
verbose = yes
showthresh = yes
- 'settings_intensity' -
sets default intensity processing method for the autoProcess feature.
The default setting is saa (for spatial anti-aliasing).
- 'settings_margin' -
sets margin for cropping. default is 5 pixels.
- 'settings_showthreshold' -
showThreshold takes a while to run. Set to 'no' for a faster autoProcess
run time. Default is 'yes'
- 'settings_step'
sets default step value along the spectral trace (and thus resolution of
resulting intensity plot). default is 1 pixel-equivalence.
- 'settings_threshold' -
sets default threshold. Set to -1 if you would like the program to always ask.
The default setting is -1 (always asks).
- 'settings_verbose' -
sets verboseness. 'yes' to include debug statements, 'no' is default.
- 'view_settings' -
view your current settings
""")
return None
def rehash():
print("""
Type 'quit' or 'exit' to leave this program. Alternately, you may use
ctrl-c to force-interrupt at any time. Type 'help' for sample workflow,
'commands' for a list of functions and commands, and 'about' for more info.
""")
return None
def viewSettings(config):
print("Current settings:")
print("default threshold: ", config['CONTROL']['defaultthreshold'])
print("autoIntensity: ", config['CONTROL']['autointensity'])
print("manual override top crop:", config['CONTROL']['manualtop'])
print("manual override bottom crop:", config['CONTROL']['manualbot'])
print("manual override left crop:", config['CONTROL']['manualleft'])
print("manual override right crop:", config['CONTROL']['manualright'])
print("step:", config['CONTROL']['r'])
print("verbose:", config['CONTROL']['verbose'])
print("showthresh:", config['CONTROL']['showthresh'])
print("margin:",config['CONTROL']['margin'])
return None
def licence():
print("""
This program comes with absolutely no warranty. This is libre/gratis software,
and you are welcome to redistribute it under certain conditions.
""")
time.sleep(3)
print("""
stellarPYL is copyright (c) 2015 Brunston Poon
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
THE REST OF THE LICENCE TEXT IS VIEWABLE IN LICENCE.txt
stellarPYL is copyright (c) 2015 Brunston Poon.
""")
return None
def jellyfish():
print("""
(hello!)
.'
'
_ -- ~~~ -- _ _______
.-~ ~-.{__-----. :
/ \ | |
: O O : | |
/\ /------' j
{ {/~-. \__/ .-~\~~~~~~~~~
\/ / |~:- .___. -.~\ \ \.
/ /\ \ | | { { \ \ } } \ \.
{ { \ \ | \ \ \ \ / } }
\ \ /\ \ \ \ /\ \ { {
} } { { \ \ \ \/ / \ \ \ \.
/ / } } \ \ }{ { \ \ } }
/ / { { \ \{\ \ } { {
/ / } } } \ \ / / \ \ \.
`-' { { `-'\ \`-'/ / `-'
`-' `-' `-'
unknown artist
""")
return "jellyfish"
| gpl-3.0 |
mnnit-workspace/Logical-Rhythm-17 | Class-2/dt_using_iris.py | 1 | 1111 | from sklearn import datasets,tree,cross_validation
import graphviz as gz
import numpy as np
import pydotplus
from sklearn.externals.six import StringIO
iris = datasets.load_iris()
'''
To analyze data
print(iris.target_names)
print(iris.feature_names)
print(iris.data[0])
print(iris.target[0])
for i in range(len(iris.target)):
print("Example %d: Label %s, Feature %s" % (i+1,iris.target[i],iris.data[i]))
'''
test_idx = [0,50,100]
train_target = np.delete(iris.target,test_idx)
train_data = np.delete(iris.data,test_idx,axis=0)
test_data = iris.data[test_idx]
test_target = iris.target[test_idx]
clf = tree.DecisionTreeClassifier()
clf = clf.fit(train_data, train_target)
print(test_target)
print(clf.predict(test_data))
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=iris.feature_names,
class_names=iris.target_names,
filled=True, rounded=True,
special_characters=True)
gr = pydotplus.graph_from_dot_data(dot_data.getvalue())
gr.write_pdf("out.pdf") | mit |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
equialgo/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 67 | 7474 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.