repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
knights-lab/SHOGUN | shogun/scripts/old/shogun_bt2_functional.py | 1 | 5408 | #!/usr/bin/env python
"""
Copyright 2015-2020 Knights Lab, Regents of the University of Minnesota.
This software is released under the GNU Affero General Public License (AGPL) v3.0 License.
"""
import click
from collections import defaultdict
import os
import pandas as pd
from cytoolz import valmap, valfilter
import csv
from ninja_utils.utils import find_between
from ninja_utils.utils import verify_make_dir
from dojo.taxonomy import NCBITree
from dojo.taxonomy.maps import IMGMap
from shogun.wrappers import bowtie2_align
from shogun.parsers import yield_alignments_from_sam_inf
def build_img_ncbi_map(align_gen, lca, img_map):
"""
Given a generator for SAM file, return a dictionary with QNAME as the key
and (IMG IDs: list, LCA NCBI ID: int) as the value.
:param align_gen:
:param lca:
:param img_map:
:return:
"""
lca_map = defaultdict(lambda: [set(), None])
for qname, rname in align_gen:
img_id = int(rname.split('_')[0])
if qname in lca_map:
current_rname = lca_map[qname][1]
new_taxon = img_map(img_id)
if current_rname and new_taxon:
if current_rname != new_taxon:
lca_map[qname][1] = lca(current_rname, new_taxon)
else:
lca_map[qname][1] = img_map(img_id)
lca_map[qname][0].add(rname)
return lca_map
@click.command()
@click.option('-i', '--input', type=click.Path(), default=os.getcwd(),
help='Directory containing the input FASTA files with ".fna" extensions (default=cwd)')
@click.option('-o', '--output', type=click.Path(), default=os.path.join(os.getcwd(), 'shogun_bt2_lca_out'),
help='Output directory for the results')
@click.option('-b', '--bt2_indx', help='Path to the bowtie2 index')
@click.option('-x', '--extract_ncbi_tid', default='ncbi_tid|,|',
help='Characters that sandwich the NCBI TID in the reference FASTA (default="ncbi_tid|,|")')
@click.option('-p', '--threads', type=click.INT, default=1, help='The number of threads to use (default=1)')
def shogun_functional(input, output, bt2_indx, extract_ncbi_tid, threads):
verify_make_dir(output)
basenames = [os.path.basename(filename)[:-4] for filename in os.listdir(input) if filename.endswith('.fna')]
# Create a SAM file for each input FASTA file
for basename in basenames:
fna_inf = os.path.join(input, basename + '.fna')
sam_outf = os.path.join(output, basename + '.sam')
if os.path.isfile(sam_outf):
print("Found the samfile \"%s\". Skipping the alignment phase for this file." % sam_outf)
else:
print(bowtie2_align(fna_inf, sam_outf, bt2_indx, num_threads=threads))
img_map = IMGMap()
for basename in basenames:
sam_inf = os.path.join(output, basename + '.sam')
step_outf = 'test'
if os.path.isfile(step_outf):
print("Found the \"%s.kegg.csv\". Skipping the LCA phase for this file." % step_outf)
else:
lca_map = build_img_ncbi_map(yield_alignments_from_sam_inf(sam_inf), )
sam_files = [os.path.join(args.input, filename) for filename in os.listdir(args.input) if filename.endswith('.sam')]
img_map = IMGMap()
ncbi_tree = NCBITree()
lca = LCA(ncbi_tree, args.depth)
with open(args.output, 'w') if args.output else sys.stdout as outf:
csv_outf = csv.writer(outf, quoting=csv.QUOTE_ALL, lineterminator='\n')
csv_outf.writerow(['sample_id', 'sequence_id', 'ncbi_tid', 'img_id'])
for file in sam_files:
with open(file) as inf:
lca_map = build_lca_map(yield_alignments_from_sam_inf(inf), lca, img_map)
for key in lca_map:
img_ids, ncbi_tid = lca_map[key]
csv_outf.writerow([os.path.basename(file).split('.')[0], key, ncbi_tid, ','.join(img_ids)])
if run_lca:
tree = NCBITree()
rank_name = list(tree.lineage_ranks.keys())[depth - 1]
if not rank_name:
raise ValueError('Depth must be between 0 and 7, it was %d' % depth)
begin, end = extract_ncbi_tid.split(',')
counts = []
for basename in basenames:
sam_file = os.path.join(output, basename + '.sam')
lca_map = {}
for qname, rname in yield_alignments_from_sam_inf(sam_file):
ncbi_tid = int(find_between(rname, begin, end))
if qname in lca_map:
current_ncbi_tid = lca_map[qname]
if current_ncbi_tid:
if current_ncbi_tid != ncbi_tid:
lca_map[qname] = tree.lowest_common_ancestor(ncbi_tid, current_ncbi_tid)
else:
lca_map[qname] = ncbi_tid
if annotate_lineage:
lca_map = valmap(lambda x: tree.green_genes_lineage(x, depth=depth), lca_map)
taxon_counts = Counter(filter(None, lca_map.values()))
else:
lca_map = valfilter(lambda x: tree.get_rank_from_taxon_id(x) == rank_name, lca_map)
taxon_counts = Counter(filter(None, lca_map.values()))
counts.append(taxon_counts)
df = pd.DataFrame(counts, index=basenames)
df.T.to_csv(os.path.join(output, 'taxon_counts.csv'))
if __name__ == '__main__':
shogun_functional()
| agpl-3.0 |
Jimmy-Morzaria/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
suryakant54321/basicDataPrep | extractArray.py | 1 | 3617 | #-----------------------------------------------------
# Ref YATSM :https://github.com/ceholden/yatsm
# ----------------------------------------------------
# Script Name: extractArray.py
# Author: Suryakant Sawant ([email protected])
# Date: 20 August 2015
# This script helps to extract data from cache of YATSM :)
# to increase Time Series analysis capabilities
#
# 1. Read all chache files (.npz)
# 2. Extract usable information
# band number, date, reflectance
# 3. Write csv file with pixel details
# convention used for output files
# <pixel ID>_<Shape>.csv
#
# simple ! is it !! ?? :)
# 4. Now you can use output and process it with
# Python / R / Spreadsheets / Matlab
#
# Note: If you are successful in running TSTools / YATSM
# in QGIS(i.e.
# https://github.com/ceholden/TSTools or
# https://github.com/ceholden/yatsm
#
# I am trying to make it more simple.
# Some sections are hard coded (marked with #--*--)
#-----------------------------------------------------
#!/bin python2
import os, re, gdal
import numpy as np
import shutil, time
# To Do : plot values on the fly :)
#import * from pylab
#import matplotlib
#
# Your project path
os.chdir("/media/opengeo-usb/surya2") #--*--
# cache path
cacheDir = "suryaWork/work/8_thermalData/4_tsData/cache" #--*--
csvPath = "suryaWork/work/8_thermalData/6_TimeSeriesOutput" #--*--
def extract(cacheDir, csvPath):
#print (cacheDir)
allFiles = os.listdir(cacheDir)
count = 0
for zipAray in allFiles:
#print(zipAray)
if zipAray.endswith(".npz"):
filePath = ("%s/%s")%(cacheDir, zipAray)
a = np.load(filePath)
print("==========================\n \
Processing new %s File ")%(zipAray)
print ("Keys = %s")%(a.keys())
# ['image_0', 'data_0']
metData = a['image_0']
print("datatype of key image_0 = %s")%(metData.dtype)
"""
[('filename', 'O'), ('path', 'O'), ('id', 'O'),
('date', 'O'), ('ordinal', '<u4'), ('doy', '<u2')]
"""
print ("Sample Key image_0 = %s")%(metData[0])
"""
('subset_LT51450451990031.tif', '<folder path>/subset_LT51450451990031.tif',
'LT51450451990031', datetime.datetime(1990, 1, 31, 0, 0), 726498L, 31)
"""
data = a['data_0']
print ("Number of arrays/Bands in data = %s")%(len(data))
print ("Number of observations for a band = %s")%(len(data[0]))
print ("shape of data array = ");print(data.shape)
# Fun Starts here :)
tData = np.transpose(data)
print ("New shape of data array = ");print(tData.shape)
#***
fNames = []
dateTime = []
ordDay = []
for i in range(0,len(metData)):
fNames.append(metData[i][2]) #--*--
dateTime.append(metData[i][3]) #--*--
ordDay.append(metData[i][4]) #--*--
fNames = np.asarray(fNames, dtype=np.str)
dateTime = np.asarray(dateTime, dtype=np.str)
ordDay = np.asarray(ordDay, dtype=np.str)
numRows = len(fNames)
fNames = fNames.reshape(numRows,1)
dateTime = dateTime.reshape(numRows,1)
ordDay = ordDay.reshape(numRows,1)
#***
# concatenate all arrays. This (#*** to #***) can be done in much better and faster way :)
allMet = np.concatenate((fNames, dateTime, ordDay, tData), axis=1)
print("New array shape = ")
print(allMet.shape)
jj = allMet.shape
csvFile = re.split('_',zipAray) # x1121y1090_i0n388b8.npz
csvFile = ("%s_r%sc%s.csv")%(csvFile[0],jj[0], jj[1])
csvFile = ("%s/%s")%(csvPath, csvFile)
np.savetxt(csvFile, allMet, delimiter=',', fmt='%19s', header='name, date, ordate, b, g, r, nir, swir1, swir2, cfmask, thermal')
#print ("Array %s available")%(zipAray)
#--
extract(cacheDir, csvPath)
#
| gpl-2.0 |
andersbll/deeppy-website | _downloads/convnet_mnist.py | 5 | 3024 | #!/usr/bin/env python
"""
Convnets for image classification (1)
=====================================
"""
import numpy as np
import deeppy as dp
import matplotlib
import matplotlib.pyplot as plt
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(dp_dtypes=True)
# Bring images to BCHW format
x_train = x_train[:, np.newaxis, :, :]
x_test = x_test[:, np.newaxis, :, :]
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Prepare network inputs
batch_size = 128
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
test_input = dp.Input(x_test)
# Setup network
def pool_layer():
return dp.Pool(
win_shape=(2, 2),
strides=(2, 2),
border_mode='valid',
method='max',
)
def conv_layer(n_filters):
return dp.Convolution(
n_filters=n_filters,
filter_shape=(5, 5),
border_mode='valid',
weights=dp.Parameter(dp.AutoFiller(gain=1.39),
weight_decay=0.0005),
)
weight_gain_fc = 1.84
weight_decay_fc = 0.002
net = dp.NeuralNetwork(
layers=[
conv_layer(32),
dp.Activation('relu'),
pool_layer(),
conv_layer(64),
dp.Activation('relu'),
pool_layer(),
dp.Flatten(),
dp.DropoutFullyConnected(
n_out=512,
dropout=0.5,
weights=dp.Parameter(dp.AutoFiller(weight_gain_fc),
weight_decay=weight_decay_fc),
),
dp.Activation('relu'),
dp.FullyConnected(
n_out=dataset.n_classes,
weights=dp.Parameter(dp.AutoFiller(weight_gain_fc)),
),
],
loss=dp.SoftmaxCrossEntropy(),
)
# Train network
n_epochs = [50, 15, 15]
learn_rate = 0.05
momentum = 0.88
for i, epochs in enumerate(n_epochs):
trainer = dp.StochasticGradientDescent(
max_epochs=epochs, learn_rule=dp.Momentum(learn_rate=learn_rate/10**i,
momentum=momentum),
)
trainer.train(net, train_input)
# Plot misclassified images.
def plot_img(img, title):
plt.figure()
plt.imshow(img, cmap='gray', interpolation='nearest')
plt.title(title)
plt.axis('off')
plt.tight_layout()
errors = net.predict(x_test) != y_test
n_errors = np.sum(errors)
x_errors = np.squeeze(x_test[errors])
plot_img(dp.misc.img_tile(dp.misc.img_stretch(x_errors), aspect_ratio=0.6),
'All %i misclassified digits' % n_errors)
# Plot convolutional filters.
filters = [l.weights.array for l in net.layers
if isinstance(l, dp.Convolution)]
fig = plt.figure()
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[1, 3])
for i, f in enumerate(filters):
ax = plt.subplot(gs[i])
ax.imshow(dp.misc.conv_filter_tile(f), cmap='gray',
interpolation='nearest')
ax.set_title('Conv layer %i' % i)
ax.axis('off')
plt.tight_layout()
| mit |
femtotrader/pyfolio | pyfolio/plotting.py | 1 | 66297 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from collections import OrderedDict
import pandas as pd
import numpy as np
import scipy as sp
import datetime
import pytz
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import matplotlib.patches as patches
from matplotlib import figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from . import utils
from . import timeseries
from . import pos
from . import _seaborn as sns
from . import txn
from . import capacity
from .utils import (APPROX_BDAYS_PER_MONTH,
MM_DISPLAY_UNIT)
from functools import wraps
import empyrical
def customize(func):
"""
Decorator to set plotting context and axes style during function call.
"""
@wraps(func)
def call_w_context(*args, **kwargs):
set_context = kwargs.pop('set_context', True)
if set_context:
with plotting_context(), axes_style():
return func(*args, **kwargs)
else:
return func(*args, **kwargs)
return call_w_context
def plotting_context(context='notebook', font_scale=1.5, rc=None):
"""
Create pyfolio default plotting style context.
Under the hood, calls and returns seaborn.plotting_context() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
context : str, optional
Name of seaborn context.
font_scale : float, optional
Scale font by factor font_scale.
rc : dict, optional
Config flags.
By default, {'lines.linewidth': 1.5}
is being used and will be added to any
rc passed in, unless explicitly overriden.
Returns
-------
seaborn plotting context
Example
-------
>>> with pyfolio.plotting.plotting_context(font_scale=2):
>>> pyfolio.create_full_tear_sheet(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {'lines.linewidth': 1.5}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.plotting_context(context=context, font_scale=font_scale, rc=rc)
def axes_style(style='darkgrid', rc=None):
"""
Create pyfolio default axes style context.
Under the hood, calls and returns seaborn.axes_style() with
some custom settings. Usually you would use in a with-context.
Parameters
----------
style : str, optional
Name of seaborn style.
rc : dict, optional
Config flags.
Returns
-------
seaborn plotting context
Example
-------
>>> with pyfolio.plotting.axes_style(style='whitegrid'):
>>> pyfolio.create_full_tear_sheet(..., set_context=False)
See also
--------
For more information, see seaborn.plotting_context().
"""
if rc is None:
rc = {}
rc_default = {}
# Add defaults if they do not exist
for name, val in rc_default.items():
rc.setdefault(name, val)
return sns.axes_style(style=style, rc=rc)
def plot_rolling_fama_french(
returns,
factor_returns=None,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best',
ax=None, **kwargs):
"""
Plots rolling Fama-French single factor betas.
Specifically, plots SMB, HML, and UMD vs. date with a legend.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame, optional
data set containing the Fama-French risk factors. See
utils.load_portfolio_risk_factors.
rolling_window : int, optional
The days window over which to compute the beta.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
ax.set_title(
"Rolling Fama-French single factor betas (%.0f-month)" % (
rolling_window / APPROX_BDAYS_PER_MONTH
)
)
ax.set_ylabel('Beta')
rolling_beta = timeseries.rolling_fama_french(
returns,
factor_returns=factor_returns,
rolling_window=rolling_window)
rolling_beta.plot(alpha=0.7, ax=ax, **kwargs)
ax.axhline(0.0, color='black')
ax.legend(['Small-Caps (SMB)',
'High-Growth (HML)',
'Momentum (UMD)'],
loc=legend_loc)
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.axhline(0.0, color='black')
ax.set_xlabel('')
ax.set_ylim((-1.0, 1.0))
return ax
def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):
"""
Plots a heatmap of returns by month.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
monthly_ret_table = empyrical.aggregate_returns(returns, 'monthly')
monthly_ret_table = monthly_ret_table.unstack().round(3)
sns.heatmap(
monthly_ret_table.fillna(0) *
100.0,
annot=True,
annot_kws={
"size": 9},
alpha=1.0,
center=0.0,
cbar=False,
cmap=matplotlib.cm.RdYlGn,
ax=ax, **kwargs)
ax.set_ylabel('Year')
ax.set_xlabel('Month')
ax.set_title("Monthly returns (%)")
return ax
def plot_annual_returns(returns, ax=None, **kwargs):
"""
Plots a bar graph of returns by year.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
ann_ret_df = pd.DataFrame(
empyrical.aggregate_returns(
returns,
'yearly'))
ax.axvline(
100 *
ann_ret_df.values.mean(),
color='steelblue',
linestyle='--',
lw=4,
alpha=0.7)
(100 * ann_ret_df.sort_index(ascending=False)
).plot(ax=ax, kind='barh', alpha=0.70, **kwargs)
ax.axvline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylabel('Year')
ax.set_xlabel('Returns')
ax.set_title("Annual returns")
ax.legend(['mean'])
return ax
def plot_monthly_returns_dist(returns, ax=None, **kwargs):
"""
Plots a distribution of monthly returns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
x_axis_formatter = FuncFormatter(utils.percentage)
ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))
ax.tick_params(axis='x', which='major', labelsize=10)
monthly_ret_table = empyrical.aggregate_returns(returns, 'monthly')
ax.hist(
100 * monthly_ret_table,
color='orangered',
alpha=0.80,
bins=20,
**kwargs)
ax.axvline(
100 * monthly_ret_table.mean(),
color='gold',
linestyle='--',
lw=4,
alpha=1.0)
ax.axvline(0.0, color='black', linestyle='-', lw=3, alpha=0.75)
ax.legend(['mean'])
ax.set_ylabel('Number of months')
ax.set_xlabel('Returns')
ax.set_title("Distribution of monthly returns")
return ax
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
"""
Plots total amount of stocks with an active position, either short
or long. Displays daily total, daily average per month, and
all-time daily average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.replace(0, np.nan).count(axis=1)
df_holdings_by_month = df_holdings.resample('1M').mean()
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3,
alpha=1.0)
ax.set_xlim((returns.index[0], returns.index[-1]))
leg = ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, overall'],
loc=legend_loc, frameon=True,
framealpha=0.8, prop={'size': 12})
leg.get_frame().set_edgecolor('black')
ax.set_title('Total holdings')
ax.set_ylabel('Holdings')
ax.set_xlabel('')
return ax
def plot_long_short_holdings(returns, positions,
legend_loc='upper left', ax=None, **kwargs):
"""
Plots total amount of stocks with an active position, breaking out
short and long into transparent filled regions.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
positions = positions.drop('cash', axis='columns')
positions = positions.replace(0, np.nan)
df_longs = positions[positions > 0].count(axis=1)
df_shorts = positions[positions < 0].count(axis=1)
l_color = matplotlib.colors.colorConverter.to_rgba('#28B121', alpha=.7)
s_color = matplotlib.colors.colorConverter.to_rgba('#D9292E', alpha=.7)
lf = ax.fill_between(df_longs.index, 0, df_longs.values,
color='#28B121', alpha=0.25, lw=2.0,
edgecolor=l_color)
sf = ax.fill_between(df_shorts.index, 0, df_shorts.values,
color='#D9292E', alpha=0.25, lw=2.0,
edgecolor=s_color)
bf = patches.Rectangle([0, 0], 1, 1, color='#B08C65')
leg = ax.legend([lf, sf, bf],
['Long (max: %s, min: %s)' % (df_longs.max(),
df_longs.min()),
'Short (max: %s, min: %s)' % (df_shorts.max(),
df_shorts.min()),
'Overlap'], loc=legend_loc, frameon=True,
framealpha=0.8, prop={'size': 12})
leg.get_frame().set_edgecolor('black')
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.set_title('Long and short holdings')
ax.set_ylabel('Holdings')
ax.set_xlabel('')
return ax
def plot_drawdown_periods(returns, top=10, ax=None, **kwargs):
"""
Plots cumulative returns highlighting top drawdown periods.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 10).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = empyrical.cum_returns(returns, starting_value=1.0)
df_drawdowns = timeseries.gen_drawdown_table(returns, top=top)
df_cum_rets.plot(ax=ax, **kwargs)
lim = ax.get_ylim()
colors = sns.cubehelix_palette(len(df_drawdowns))[::-1]
for i, (peak, recovery) in df_drawdowns[
['Peak date', 'Recovery date']].iterrows():
if pd.isnull(recovery):
recovery = returns.index[-1]
ax.fill_between((peak, recovery),
lim[0],
lim[1],
alpha=.4,
color=colors[i])
ax.set_ylim(lim)
ax.set_title('Top %i drawdown periods' % top)
ax.set_ylabel('Cumulative returns')
ax.legend(['Portfolio'], loc='upper left')
ax.set_xlabel('')
return ax
def plot_drawdown_underwater(returns, ax=None, **kwargs):
"""
Plots how far underwaterr returns are over time, or plots current
drawdown vs. date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.percentage)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_cum_rets = empyrical.cum_returns(returns, starting_value=1.0)
running_max = np.maximum.accumulate(df_cum_rets)
underwater = -100 * ((running_max - df_cum_rets) / running_max)
(underwater).plot(ax=ax, kind='area', color='coral', alpha=0.7, **kwargs)
ax.set_ylabel('Drawdown')
ax.set_title('Underwater plot')
ax.set_xlabel('')
return ax
def plot_perf_stats(returns, factor_returns, ax=None):
"""
Create box plot of some performance metrics of the strategy.
The width of the box whiskers is determined by a bootstrap.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.DataFrame, optional
data set containing the Fama-French risk factors. See
utils.load_portfolio_risk_factors.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
bootstrap_values = timeseries.perf_stats_bootstrap(returns,
factor_returns,
return_stats=False)
bootstrap_values = bootstrap_values.drop('Kurtosis', axis='columns')
sns.boxplot(data=bootstrap_values, orient='h', ax=ax)
return ax
STAT_FUNCS_PCT = [
'Annual return',
'Cumulative returns',
'Annual volatility',
'Max drawdown',
'Daily value at risk',
'Daily turnover'
]
def show_perf_stats(returns, factor_returns, positions=None,
transactions=None, live_start_date=None,
bootstrap=False):
"""
Prints some performance metrics of the strategy.
- Shows amount of time the strategy has been run in backtest and
out-of-sample (in live trading).
- Shows Omega ratio, max drawdown, Calmar ratio, annual return,
stability, Sharpe ratio, annual volatility, alpha, and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading, after
its backtest period.
bootstrap : boolean (optional)
Whether to perform bootstrap analysis for the performance
metrics.
- For more information, see timeseries.perf_stats_bootstrap
"""
if bootstrap:
perf_func = timeseries.perf_stats_bootstrap
else:
perf_func = timeseries.perf_stats
perf_stats_all = perf_func(
returns,
factor_returns=factor_returns,
positions=positions,
transactions=transactions)
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
returns_is = returns[returns.index < live_start_date]
returns_oos = returns[returns.index >= live_start_date]
positions_is = None
positions_oos = None
transactions_is = None
transactions_oos = None
if positions is not None:
positions_is = positions[positions.index < live_start_date]
positions_oos = positions[positions.index >= live_start_date]
if transactions is not None:
transactions_is = transactions[(transactions.index <
live_start_date)]
transactions_oos = transactions[(transactions.index >
live_start_date)]
perf_stats_is = perf_func(
returns_is,
factor_returns=factor_returns,
positions=positions_is,
transactions=transactions_is)
perf_stats_oos = perf_func(
returns_oos,
factor_returns=factor_returns,
positions=positions_oos,
transactions=transactions_oos)
print('In-sample months: ' +
str(int(len(returns_is) / APPROX_BDAYS_PER_MONTH)))
print('Out-of-sample months: ' +
str(int(len(returns_oos) / APPROX_BDAYS_PER_MONTH)))
perf_stats = pd.concat(OrderedDict([
('In-sample', perf_stats_is),
('Out-of-sample', perf_stats_oos),
('All', perf_stats_all),
]), axis=1)
else:
print('Backtest months: ' +
str(int(len(returns) / APPROX_BDAYS_PER_MONTH)))
perf_stats = pd.DataFrame(perf_stats_all, columns=['Backtest'])
for column in perf_stats.columns:
for stat, value in perf_stats[column].iteritems():
if stat in STAT_FUNCS_PCT:
perf_stats.loc[stat, column] = str(np.round(value * 100,
1)) + '%'
utils.print_table(perf_stats, fmt='{0:.2f}')
def plot_returns(returns,
live_start_date=None,
ax=None):
"""
Plots raw returns over time.
Backtest returns are in green, and out-of-sample (live trading)
returns are in red.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
live_start_date : datetime, optional
The date when the strategy began live trading, after
its backtest period. This date should be normalized.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
ax.set_label('')
ax.set_ylabel('Returns')
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
is_returns = returns.loc[returns.index < live_start_date]
oos_returns = returns.loc[returns.index >= live_start_date]
is_returns.plot(ax=ax, color='g')
oos_returns.plot(ax=ax, color='r')
else:
returns.plot(ax=ax, color='g')
return ax
def plot_rolling_returns(returns,
factor_returns=None,
live_start_date=None,
logy=False,
cone_std=None,
legend_loc='best',
volatility_match=False,
cone_function=timeseries.forecast_cone_bootstrap,
ax=None, **kwargs):
"""
Plots cumulative rolling returns versus some benchmarks'.
Backtest returns are in green, and out-of-sample (live trading)
returns are in red.
Additionally, a non-parametric cone plot may be added to the
out-of-sample returns region.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of a risk factor.
- This is in the same style as returns.
live_start_date : datetime, optional
The date when the strategy began live trading, after
its backtest period. This date should be normalized.
logy : bool, optional
Whether to log-scale the y-axis.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- See timeseries.forecast_cone_bounds for more details.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
volatility_match : bool, optional
Whether to normalize the volatility of the returns to those of the
benchmark returns. This helps compare strategies with different
volatilities. Requires passing of benchmark_rets.
cone_function : function, optional
Function to use when generating forecast probability cone.
The function signiture must follow the form:
def cone(in_sample_returns (pd.Series),
days_to_project_forward (int),
cone_std= (float, or tuple),
starting_value= (int, or float))
See timeseries.forecast_cone_bootstrap for an example.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
ax.set_xlabel('')
ax.set_ylabel('Cumulative returns')
ax.set_yscale('log' if logy else 'linear')
if volatility_match and factor_returns is None:
raise ValueError('volatility_match requires passing of'
'factor_returns.')
elif volatility_match and factor_returns is not None:
bmark_vol = factor_returns.loc[returns.index].std()
returns = (returns / returns.std()) * bmark_vol
cum_rets = empyrical.cum_returns(returns, 1.0)
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
if factor_returns is not None:
cum_factor_returns = empyrical.cum_returns(
factor_returns[cum_rets.index], 1.0)
cum_factor_returns.plot(lw=2, color='gray',
label=factor_returns.name, alpha=0.60,
ax=ax, **kwargs)
if live_start_date is not None:
live_start_date = utils.get_utc_timestamp(live_start_date)
is_cum_returns = cum_rets.loc[cum_rets.index < live_start_date]
oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date]
else:
is_cum_returns = cum_rets
oos_cum_returns = pd.Series([])
is_cum_returns.plot(lw=3, color='forestgreen', alpha=0.6,
label='Backtest', ax=ax, **kwargs)
if len(oos_cum_returns) > 0:
oos_cum_returns.plot(lw=4, color='red', alpha=0.6,
label='Live', ax=ax, **kwargs)
if cone_std is not None:
if isinstance(cone_std, (float, int)):
cone_std = [cone_std]
is_returns = returns.loc[returns.index < live_start_date]
cone_bounds = cone_function(
is_returns,
len(oos_cum_returns),
cone_std=cone_std,
starting_value=is_cum_returns[-1])
cone_bounds = cone_bounds.set_index(oos_cum_returns.index)
for std in cone_std:
ax.fill_between(cone_bounds.index,
cone_bounds[float(std)],
cone_bounds[float(-std)],
color='steelblue', alpha=0.5)
if legend_loc is not None:
ax.legend(loc=legend_loc)
ax.axhline(1.0, linestyle='--', color='black', lw=2)
return ax
def plot_rolling_beta(returns, factor_returns, legend_loc='best',
ax=None, **kwargs):
"""
Plots the rolling 6-month and 12-month beta versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.set_title("Rolling portfolio beta to " + str(factor_returns.name))
ax.set_ylabel('Beta')
rb_1 = timeseries.rolling_beta(
returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6)
rb_1.plot(color='steelblue', lw=3, alpha=0.6, ax=ax, **kwargs)
rb_2 = timeseries.rolling_beta(
returns, factor_returns, rolling_window=APPROX_BDAYS_PER_MONTH * 12)
rb_2.plot(color='grey', lw=3, alpha=0.4, ax=ax, **kwargs)
ax.axhline(rb_1.mean(), color='steelblue', linestyle='--', lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=2)
ax.set_xlabel('')
ax.legend(['6-mo',
'12-mo'],
loc=legend_loc)
ax.set_ylim((-1.0, 1.0))
return ax
def plot_rolling_volatility(returns, factor_returns=None,
rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best', ax=None, **kwargs):
"""
Plots the rolling volatility versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series, optional
Daily noncumulative returns of the benchmark.
rolling_window : int, optional
The days window over which to compute the volatility.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
rolling_vol_ts = timeseries.rolling_volatility(
returns, rolling_window)
rolling_vol_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax,
**kwargs)
if factor_returns is not None:
rolling_vol_ts_factor = timeseries.rolling_volatility(
factor_returns, rolling_window)
rolling_vol_ts_factor.plot(alpha=.7, lw=3, color='grey', ax=ax,
**kwargs)
ax.set_title('Rolling Volatility (6-month)')
ax.axhline(
rolling_vol_ts.mean(),
color='orangered',
linestyle='--',
lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=2)
ax.set_ylabel('Volatility')
ax.set_xlabel('')
if factor_returns.empty:
ax.legend(['Volatility', 'Average Volatility'],
loc=legend_loc)
else:
ax.legend(['Volatility', 'Benchmark Volatility', 'Average Volatility'],
loc=legend_loc)
return ax
def plot_rolling_sharpe(returns, rolling_window=APPROX_BDAYS_PER_MONTH * 6,
legend_loc='best', ax=None, **kwargs):
"""
Plots the rolling Sharpe ratio versus date.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
rolling_window : int, optional
The days window over which to compute the sharpe ratio.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
rolling_sharpe_ts = timeseries.rolling_sharpe(
returns, rolling_window)
rolling_sharpe_ts.plot(alpha=.7, lw=3, color='orangered', ax=ax,
**kwargs)
ax.set_title('Rolling Sharpe ratio (6-month)')
ax.axhline(
rolling_sharpe_ts.mean(),
color='steelblue',
linestyle='--',
lw=3)
ax.axhline(0.0, color='black', linestyle='-', lw=3)
ax.set_ylabel('Sharpe ratio')
ax.set_xlabel('')
ax.legend(['Sharpe', 'Average'],
loc=legend_loc)
return ax
def plot_gross_leverage(returns, positions, ax=None, **kwargs):
"""
Plots gross leverage versus date.
Gross leverage is the sum of long and short exposure per share
divided by net asset value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
gl = timeseries.gross_lev(positions)
gl.plot(alpha=0.8, lw=0.5, color='g', legend=False, ax=ax, **kwargs)
ax.axhline(gl.mean(), color='g', linestyle='--', lw=3, alpha=1.0)
ax.set_title('Gross leverage')
ax.set_ylabel('Gross leverage')
ax.set_xlabel('')
return ax
def plot_exposures(returns, positions, ax=None, **kwargs):
"""
Plots a cake chart of the long and short exposure.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions_alloc : pd.DataFrame
Portfolio allocation of positions. See
pos.get_percent_alloc.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
pos_no_cash = positions.drop('cash', axis=1)
l_exp = pos_no_cash[pos_no_cash > 0].sum(axis=1) / positions.sum(axis=1)
s_exp = pos_no_cash[pos_no_cash < 0].sum(axis=1) / positions.sum(axis=1)
net_exp = pos_no_cash.sum(axis=1) / positions.sum(axis=1)
ax.fill_between(l_exp.index,
0,
l_exp.values,
label='Long', color='green')
ax.fill_between(s_exp.index,
0,
s_exp.values,
label='Short', color='red')
ax.plot(net_exp.index, net_exp.values,
label='Net', color='black', linestyle='dotted')
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.set_title("Exposure")
ax.set_ylabel('Exposure')
ax.legend(loc='lower left', frameon=True)
ax.set_xlabel('')
return ax
def show_and_plot_top_positions(returns, positions_alloc,
show_and_plot=2, hide_positions=False,
legend_loc='real_best', ax=None,
**kwargs):
"""
Prints and/or plots the exposures of the top 10 held positions of
all time.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions_alloc : pd.DataFrame
Portfolio allocation of positions. See pos.get_percent_alloc.
show_and_plot : int, optional
By default, this is 2, and both prints and plots.
If this is 0, it will only plot; if 1, it will only print.
hide_positions : bool, optional
If True, will not output any symbol names.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
By default, the legend will display below the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes, conditional
The axes that were plotted on.
"""
positions_alloc = positions_alloc.copy()
positions_alloc.columns = positions_alloc.columns.map(utils.format_asset)
df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs(
positions_alloc)
if show_and_plot == 1 or show_and_plot == 2:
utils.print_table(pd.DataFrame(df_top_long * 100, columns=['max']),
fmt='{0:.2f}%',
name='Top 10 long positions of all time')
utils.print_table(pd.DataFrame(df_top_short * 100, columns=['max']),
fmt='{0:.2f}%',
name='Top 10 short positions of all time')
utils.print_table(pd.DataFrame(df_top_abs * 100, columns=['max']),
fmt='{0:.2f}%',
name='Top 10 positions of all time')
_, _, df_top_abs_all = pos.get_top_long_short_abs(
positions_alloc, top=9999)
utils.print_table(pd.DataFrame(df_top_abs_all * 100, columns=['max']),
fmt='{0:.2f}%',
name='All positions ever held')
if show_and_plot == 0 or show_and_plot == 2:
if ax is None:
ax = plt.gca()
positions_alloc[df_top_abs.index].plot(
title='Portfolio allocation over time, only top 10 holdings',
alpha=0.4, ax=ax, **kwargs)
# Place legend below plot, shrink plot by 20%
if legend_loc == 'real_best':
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(
loc='upper center', frameon=True, bbox_to_anchor=(
0.5, -0.14), ncol=5)
else:
ax.legend(loc=legend_loc)
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.set_ylabel('Exposure by stock')
if hide_positions:
ax.legend_.remove()
return ax
def plot_max_median_position_concentration(positions, ax=None, **kwargs):
"""
Plots the max and median of long and short position concentrations
over the time.
Parameters
----------
positions : pd.DataFrame
The positions that the strategy takes over time.
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gcf()
alloc_summary = pos.get_max_median_position_concentration(positions)
colors = ['mediumblue', 'steelblue', 'tomato', 'firebrick']
alloc_summary.plot(linewidth=1, color=colors, alpha=0.6, ax=ax)
ax.legend(loc='center left')
ax.set_ylabel('Exposure')
ax.set_title('Long/Short max and median position concentration')
return ax
def plot_sector_allocations(returns, sector_alloc, ax=None, **kwargs):
"""
Plots the sector exposures of the portfolio over time.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
sector_alloc : pd.DataFrame
Portfolio allocation of positions. See pos.get_sector_alloc.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gcf()
sector_alloc.plot(title='Sector allocation over time',
alpha=0.4, ax=ax, **kwargs)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(
loc='upper center', frameon=True, bbox_to_anchor=(
0.5, -0.14), ncol=5)
ax.set_xlim((sector_alloc.index[0], sector_alloc.index[-1]))
ax.set_ylabel('Exposure by sector')
ax.set_xlabel('')
return ax
def plot_return_quantiles(returns, live_start_date=None, ax=None, **kwargs):
"""
Creates a box plot of daily, weekly, and monthly return
distributions.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading, after
its backtest period.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
is_returns = returns if live_start_date is None \
else returns.loc[returns.index < live_start_date]
is_weekly = empyrical.aggregate_returns(is_returns, 'weekly')
is_monthly = empyrical.aggregate_returns(is_returns, 'monthly')
sns.boxplot(data=[is_returns, is_weekly, is_monthly],
palette=["#4c72B0", "#55A868", "#CCB974"],
ax=ax, **kwargs)
if live_start_date is not None:
oos_returns = returns.loc[returns.index >= live_start_date]
oos_weekly = empyrical.aggregate_returns(oos_returns, 'weekly')
oos_monthly = empyrical.aggregate_returns(oos_returns, 'monthly')
sns.swarmplot(data=[oos_returns, oos_weekly, oos_monthly], ax=ax,
color="red",
marker="d", **kwargs)
red_dots = matplotlib.lines.Line2D([], [], color="red", marker="d",
label="Out-of-sample data",
linestyle='')
ax.legend(handles=[red_dots])
ax.set_xticklabels(['Daily', 'Weekly', 'Monthly'])
ax.set_title('Return quantiles')
return ax
def plot_turnover(returns, transactions, positions,
legend_loc='best', ax=None, **kwargs):
"""
Plots turnover vs. date.
Turnover is the number of shares traded for a period as a fraction
of total shares.
Displays daily total, daily average per month, and all-time daily
average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(utils.two_dec_places)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
df_turnover = txn.get_turnover(positions, transactions)
df_turnover_by_month = df_turnover.resample("M").mean()
df_turnover.plot(color='steelblue', alpha=1.0, lw=0.5, ax=ax, **kwargs)
df_turnover_by_month.plot(
color='orangered',
alpha=0.5,
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_turnover.mean(), color='steelblue', linestyle='--', lw=3, alpha=1.0)
ax.legend(['Daily turnover',
'Average daily turnover, by month',
'Average daily turnover, net'],
loc=legend_loc)
ax.set_title('Daily turnover')
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.set_ylim((0, 1))
ax.set_ylabel('Turnover')
ax.set_xlabel('')
return ax
def plot_slippage_sweep(returns, transactions, positions,
slippage_params=(3, 8, 10, 12, 15, 20, 50),
ax=None, **kwargs):
"""
Plots a equity curves at different per-dollar slippage assumptions.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
slippage_params: tuple
Slippage pameters to apply to the return time series (in
basis points).
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(positions, transactions,
period=None, average=False)
slippage_sweep = pd.DataFrame()
for bps in slippage_params:
adj_returns = txn.adjust_returns_for_slippage(returns, turnover, bps)
label = str(bps) + " bps"
slippage_sweep[label] = empyrical.cum_returns(adj_returns, 1)
slippage_sweep.plot(alpha=1.0, lw=0.5, ax=ax)
ax.set_title('Cumulative returns given additional per-dollar slippage')
ax.set_ylabel('')
ax.legend(loc='center left')
return ax
def plot_slippage_sensitivity(returns, transactions, positions,
ax=None, **kwargs):
"""
Plots curve relating per-dollar slippage to average annual returns.
Parameters
----------
returns : pd.Series
Timeseries of portfolio returns to be adjusted for various
degrees of slippage.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(positions, transactions,
period=None, average=False)
avg_returns_given_slippage = pd.Series()
for bps in range(1, 100):
adj_returns = txn.adjust_returns_for_slippage(returns, turnover, bps)
avg_returns = empyrical.annual_return(adj_returns)
avg_returns_given_slippage.loc[bps] = avg_returns
avg_returns_given_slippage.plot(alpha=1.0, lw=2, ax=ax)
ax.set_title('Average annual returns given additional per-dollar slippage')
ax.set_xticks(np.arange(0, 100, 10))
ax.set_ylabel('Average annual return')
ax.set_xlabel('Per-dollar slippage (bps)')
return ax
def plot_capacity_sweep(returns, transactions, market_data,
bt_starting_capital,
min_pv=100000,
max_pv=300000000,
step_size=1000000,
ax=None):
txn_daily_w_bar = capacity.daily_txns_with_bar_data(transactions,
market_data)
captial_base_sweep = pd.Series()
for start_pv in range(min_pv, max_pv, step_size):
adj_ret = capacity.apply_slippage_penalty(returns,
txn_daily_w_bar,
start_pv,
bt_starting_capital)
sharpe = empyrical.sharpe_ratio(adj_ret)
if sharpe < -1:
break
captial_base_sweep.loc[start_pv] = sharpe
captial_base_sweep.index = captial_base_sweep.index / MM_DISPLAY_UNIT
if ax is None:
ax = plt.gca()
captial_base_sweep.plot(ax=ax)
ax.set_xlabel('Capital base ($mm)')
ax.set_ylabel('Sharpe ratio')
ax.set_title('Capital base performance sweep')
return ax
def plot_daily_turnover_hist(transactions, positions,
ax=None, **kwargs):
"""
Plots a histogram of daily turnover rates.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(positions, transactions, period=None)
sns.distplot(turnover, ax=ax, **kwargs)
ax.set_title('Distribution of daily turnover rates')
ax.set_xlabel('Turnover rate')
return ax
def plot_daily_volume(returns, transactions, ax=None, **kwargs):
"""
Plots trading volume per day vs. date.
Also displays all-time daily average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
daily_txn = txn.get_txn_vol(transactions)
daily_txn.txn_shares.plot(alpha=1.0, lw=0.5, ax=ax, **kwargs)
ax.axhline(daily_txn.txn_shares.mean(), color='steelblue',
linestyle='--', lw=3, alpha=1.0)
ax.set_title('Daily trading volume')
ax.set_xlim((returns.index[0], returns.index[-1]))
ax.set_ylabel('Amount of shares traded')
ax.set_xlabel('')
return ax
def plot_txn_time_hist(transactions, bin_minutes=5, tz='America/New_York',
ax=None, **kwargs):
"""
Plots a histogram of transaction times, binning the times into
buckets of a given duration.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
bin_minutes : float, optional
Sizes of the bins in minutes, defaults to 5 minutes.
tz : str, optional
Time zone to plot against. Note that if the specified
zone does not apply daylight savings, the distribution
may be partially offset.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
txn_time = transactions.copy()
txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz))
txn_time.index = txn_time.index.map(lambda x: x.hour*60 + x.minute)
txn_time['trade_value'] = (txn_time.amount * txn_time.price).abs()
txn_time = txn_time.groupby(level=0).sum().reindex(index=range(570, 961))
txn_time.index = (txn_time.index/bin_minutes).astype(int) * bin_minutes
txn_time = txn_time.groupby(level=0).sum()
txn_time['time_str'] = txn_time.index.map(lambda x:
str(datetime.time(int(x/60),
x % 60))[:-3])
trade_value_sum = txn_time.trade_value.sum()
txn_time.trade_value = txn_time.trade_value.fillna(0) / trade_value_sum
ax.bar(txn_time.index, txn_time.trade_value, width=bin_minutes, **kwargs)
ax.set_xlim(570, 960)
ax.set_xticks(txn_time.index[::int(30/bin_minutes)])
ax.set_xticklabels(txn_time.time_str[::int(30/bin_minutes)])
ax.set_title('Transaction time distribution')
ax.set_ylabel('Proportion')
ax.set_xlabel('')
return ax
def plot_daily_returns_similarity(returns_backtest, returns_live,
ax=None, **kwargs):
"""
Plots overlapping distributions of in-sample (backtest) returns
and out-of-sample (live trading) returns.
Parameters
----------
returns_backtest : pd.Series
Daily returns of the strategy's backtest, noncumulative.
returns_live : pd.Series
Daily returns of the strategy's live trading, noncumulative.
title : str, optional
The title to use for the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
sns.kdeplot(utils.standardize_data(returns_backtest),
bw='scott', shade=True, label='backtest',
color='forestgreen', ax=ax, **kwargs)
sns.kdeplot(utils.standardize_data(returns_live),
bw='scott', shade=True, label='out-of-sample',
color='red', ax=ax, **kwargs)
return ax
def show_worst_drawdown_periods(returns, top=5):
"""
Prints information about the worst drawdown periods.
Prints peak dates, valley dates, recovery dates, and net
drawdowns.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
top : int, optional
Amount of top drawdowns periods to plot (default 5).
"""
drawdown_df = timeseries.gen_drawdown_table(returns, top=top)
utils.print_table(drawdown_df.sort_values('Net drawdown in %',
ascending=False),
name='Worst drawdown periods', fmt='{0:.2f}')
def plot_monthly_returns_timeseries(returns, ax=None, **kwargs):
"""
Plots monthly returns as a timeseries.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
def cumulate_returns(x):
return empyrical.cum_returns(x)[-1]
if ax is None:
ax = plt.gca()
monthly_rets = returns.resample('M').apply(lambda x: cumulate_returns(x))
monthly_rets = monthly_rets.to_period()
sns.barplot(x=monthly_rets.index,
y=monthly_rets.values,
color='steelblue')
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
# only show x-labels on year boundary
xticks_coord = []
xticks_label = []
count = 0
for i in monthly_rets.index:
if i.month == 1:
xticks_label.append(i)
xticks_coord.append(count)
# plot yearly boundary line
ax.axvline(count, color='gray', ls='--', alpha=0.3)
count += 1
ax.axhline(0.0, color='darkgray', ls='-')
ax.set_xticks(xticks_coord)
ax.set_xticklabels(xticks_label)
return ax
def plot_round_trip_lifetimes(round_trips, disp_amount=16, lsize=18, ax=None):
"""
Plots timespans and directions of a sample of round trip trades.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.subplot()
symbols_sample = round_trips.symbol.unique()
np.random.seed(1)
sample = np.random.choice(round_trips.symbol.unique(), replace=False,
size=min(disp_amount, len(symbols_sample)))
sample_round_trips = round_trips[round_trips.symbol.isin(sample)]
symbol_idx = pd.Series(np.arange(len(sample)), index=sample)
for symbol, sym_round_trips in sample_round_trips.groupby('symbol'):
for _, row in sym_round_trips.iterrows():
c = 'b' if row.long else 'r'
y_ix = symbol_idx[symbol] + 0.05
ax.plot([row['open_dt'], row['close_dt']],
[y_ix, y_ix], color=c,
linewidth=lsize, solid_capstyle='butt')
ax.set_yticks(range(disp_amount))
ax.set_yticklabels([utils.format_asset(s) for s in sample])
ax.set_ylim((-0.5, min(len(sample), disp_amount) - 0.5))
blue = patches.Rectangle([0, 0], 1, 1, color='b', label='Long')
red = patches.Rectangle([0, 0], 1, 1, color='r', label='Short')
leg = ax.legend(handles=[blue, red], frameon=True, loc='lower left')
leg.get_frame().set_edgecolor('black')
ax.grid(False)
return ax
def show_profit_attribution(round_trips):
"""
Prints the share of total PnL contributed by each
traded name.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
total_pnl = round_trips['pnl'].sum()
pnl_attribution = round_trips.groupby('symbol')['pnl'].sum() / total_pnl
pnl_attribution.name = ''
pnl_attribution.index = pnl_attribution.index.map(utils.format_asset)
utils.print_table(pnl_attribution.sort_values(
inplace=False,
ascending=False),
name='Profitability (PnL / PnL total) per name',
fmt='{:.2%}')
def plot_prob_profit_trade(round_trips, ax=None):
"""
Plots a probability distribution for the event of making
a profitable trade.
Parameters
----------
round_trips : pd.DataFrame
DataFrame with one row per round trip trade.
- See full explanation in round_trips.extract_round_trips
ax : matplotlib.Axes, optional
Axes upon which to plot.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
x = np.linspace(0, 1., 500)
round_trips['profitable'] = round_trips.pnl > 0
dist = sp.stats.beta(round_trips.profitable.sum(),
(~round_trips.profitable).sum())
y = dist.pdf(x)
lower_perc = dist.ppf(.025)
upper_perc = dist.ppf(.975)
lower_plot = dist.ppf(.001)
upper_plot = dist.ppf(.999)
if ax is None:
ax = plt.subplot()
ax.plot(x, y)
ax.axvline(lower_perc, color='0.5')
ax.axvline(upper_perc, color='0.5')
ax.set_xlabel('Probability of making a profitable decision')
ax.set_ylabel('Belief')
ax.set_xlim(lower_plot, upper_plot)
ax.set_ylim((0, y.max() + 1.))
return ax
def plot_cones(name, bounds, oos_returns, num_samples=1000, ax=None,
cone_std=(1., 1.5, 2.), random_seed=None, num_strikes=3):
"""
Plots the upper and lower bounds of an n standard deviation
cone of forecasted cumulative returns. Redraws a new cone when
cumulative returns fall outside of last cone drawn.
Parameters
----------
name : str
Account name to be used as figure title.
bounds : pandas.core.frame.DataFrame
Contains upper and lower cone boundaries. Column names are
strings corresponding to the number of standard devations
above (positive) or below (negative) the projected mean
cumulative returns.
oos_returns : pandas.core.frame.DataFrame
Non-cumulative out-of-sample returns.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
ax : matplotlib.Axes, optional
Axes upon which to plot.
cone_std : list of int/float
Number of standard devations to use in the boundaries of
the cone. If multiple values are passed, cone bounds will
be generated for each value.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
num_strikes : int
Upper limit for number of cones drawn. Can be anything from 0 to 3.
Returns
-------
Returns are either an ax or fig option, but not both. If a
matplotlib.Axes instance is passed in as ax, then it will be modified
and returned. This allows for users to plot interactively in jupyter
notebook. When no ax object is passed in, a matplotlib.figure instance
is generated and returned. This figure can then be used to save
the plot as an image without viewing it.
ax : matplotlib.Axes
The axes that were plotted on.
fig : matplotlib.figure
The figure instance which contains all the plot elements.
"""
if ax is None:
fig = figure.Figure(figsize=(10, 8))
FigureCanvasAgg(fig)
axes = fig.add_subplot(111)
else:
axes = ax
returns = empyrical.cum_returns(oos_returns, starting_value=1.)
bounds_tmp = bounds.copy()
returns_tmp = returns.copy()
cone_start = returns.index[0]
colors = ["green", "orange", "orangered", "darkred"]
for c in range(num_strikes + 1):
if c > 0:
tmp = returns.loc[cone_start:]
bounds_tmp = bounds_tmp.iloc[0:len(tmp)]
bounds_tmp = bounds_tmp.set_index(tmp.index)
crossing = (tmp < bounds_tmp[float(-2.)].iloc[:len(tmp)])
if crossing.sum() <= 0:
break
cone_start = crossing.loc[crossing].index[0]
returns_tmp = returns.loc[cone_start:]
bounds_tmp = (bounds - (1 - returns.loc[cone_start]))
for std in cone_std:
x = returns_tmp.index
y1 = bounds_tmp[float(std)].iloc[:len(returns_tmp)]
y2 = bounds_tmp[float(-std)].iloc[:len(returns_tmp)]
axes.fill_between(x, y1, y2, color=colors[c], alpha=0.5)
# Plot returns line graph
label = 'Cumulative returns = {:.2f}%'.format((returns.iloc[-1] - 1) * 100)
axes.plot(returns.index, returns.values, color='black', lw=3.,
label=label)
if name is not None:
axes.set_title(name)
axes.axhline(1, color='black', alpha=0.2)
axes.legend()
if ax is None:
return fig
else:
return axes
def plot_multistrike_cones(is_returns, oos_returns, num_samples=1000,
name=None, ax=None, cone_std=(1., 1.5, 2.),
random_seed=None, num_strikes=0):
"""
Plots the upper and lower bounds of an n standard deviation
cone of forecasted cumulative returns. This cone is non-parametric,
meaning it does not assume that returns are normally distributed. Redraws
a new cone when returns fall outside of last cone drawn.
Parameters
----------
is_returns : pandas.core.frame.DataFrame
Non-cumulative in-sample returns.
oos_returns : pandas.core.frame.DataFrame
Non-cumulative out-of-sample returns.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
name : str, optional
Plot title
ax : matplotlib.Axes, optional
Axes upon which to plot.
cone_std : list of int/float
Number of standard devations to use in the boundaries of
the cone. If multiple values are passed, cone bounds will
be generated for each value.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
num_strikes : int
Upper limit for number of cones drawn. Can be anything from 0 to 3.
Returns
-------
Returns are either an ax or fig option, but not both. If a
matplotlib.Axes instance is passed in as ax, then it will be modified
and returned. This allows for users to plot interactively in jupyter
notebook. When no ax object is passed in, a matplotlib.figure instance
is generated and returned. This figure can then be used to save
the plot as an image without viewing it.
ax : matplotlib.Axes
The axes that were plotted on.
fig : matplotlib.figure
The figure instance which contains all the plot elements.
"""
bounds = timeseries.forecast_cone_bootstrap(
is_returns=is_returns,
num_days=len(oos_returns),
cone_std=cone_std,
num_samples=num_samples,
random_seed=random_seed
)
return plot_cones(
name=name,
bounds=bounds.set_index(oos_returns.index),
oos_returns=oos_returns,
num_samples=num_samples,
ax=ax,
cone_std=cone_std,
random_seed=random_seed,
num_strikes=num_strikes
)
| apache-2.0 |
pratyakshs/pgmpy | pgmpy/inference/Sampling.py | 2 | 15872 | from collections import namedtuple
import itertools
import networkx as nx
import numpy as np
from pandas import DataFrame
from pgmpy.factors.Factor import Factor, factor_product
from pgmpy.inference import Inference
from pgmpy.models import BayesianModel, MarkovChain, MarkovModel
from pgmpy.utils.mathext import sample_discrete
State = namedtuple('State', ['var', 'state'])
class BayesianModelSampling(Inference):
"""
Class for sampling methods specific to Bayesian Models
Parameters
----------
model: instance of BayesianModel
model on which inference queries will be computed
Public Methods
--------------
forward_sample(size)
"""
def __init__(self, model):
if not isinstance(model, BayesianModel):
raise TypeError("model must an instance of BayesianModel")
super().__init__(model)
self.topological_order = nx.topological_sort(model)
self.cpds = {node: model.get_cpds(node) for node in model.nodes()}
def forward_sample(self, size=1):
"""
Generates sample(s) from joint distribution of the bayesian network.
Parameters
----------
size: int
size of sample to be generated
Returns
-------
sampled: pandas.DataFrame
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.CPD import TabularCPD
>>> from pgmpy.inference.Sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> inference.forward_sample(2)
diff intel grade
0 (diff, 1) (intel, 0) (grade, 1)
1 (diff, 1) (intel, 0) (grade, 2)
"""
sampled = DataFrame(index=range(size), columns=self.topological_order)
for node in self.topological_order:
cpd = self.cpds[node]
states = [state for state in range(cpd.get_cardinality(node)[node])]
if cpd.evidence:
indices = [i for i, x in enumerate(self.topological_order) if x in cpd.evidence]
evidence = sampled.values[:, [indices]].tolist()
weights = list(map(lambda t: cpd.reduce(t[0], inplace=False).values, evidence))
sampled[node] = list(map(lambda t: State(node, t), sample_discrete(states, weights)))
else:
sampled[node] = list(map(lambda t: State(node, t),
sample_discrete(states, cpd.values, size)))
return sampled
def rejection_sample(self, evidence=None, size=1):
"""
Generates sample(s) from joint distribution of the bayesian network,
given the evidence.
Parameters
----------
evidence: list of `pgmpy.factor.State` namedtuples
None if no evidence
size: int
size of sample to be generated
Returns
-------
sampled: pandas.DataFrame
the generated samples
Examples
--------
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.CPD import TabularCPD
>>> from pgmpy.factors.Factor import State
>>> from pgmpy.inference.Sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> evidence = [State(var='diff', state=0)]
>>> inference.rejection_sample(evidence, 2)
intel diff grade
0 (intel, 0) (diff, 0) (grade, 1)
1 (intel, 0) (diff, 0) (grade, 1)
"""
if evidence is None:
return self.forward_sample(size)
sampled = DataFrame(columns=self.topological_order)
prob = 1
while len(sampled) < size:
_size = int(((size - len(sampled)) / prob) * 1.5)
_sampled = self.forward_sample(_size)
for evid in evidence:
_sampled = _sampled[_sampled.ix[:, evid.var] == evid]
prob = max(len(_sampled) / _size, 0.01)
sampled = sampled.append(_sampled)
sampled.reset_index(inplace=True, drop=True)
return sampled[:size]
def likelihood_weighted_sample(self, evidence=None, size=1):
"""
Generates weighted sample(s) from joint distribution of the bayesian
network, that comply with the given evidence.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Algorithm 12.2 pp 493.
Parameters
----------
evidence: list of `pgmpy.factor.State` namedtuples
None if no evidence
size: int
size of sample to be generated
Returns
-------
sampled: pandas.DataFrame
the generated samples with corresponding weights
Examples
--------
>>> from pgmpy.factors.Factor import State
>>> from pgmpy.models.BayesianModel import BayesianModel
>>> from pgmpy.factors.CPD import TabularCPD
>>> from pgmpy.inference.Sampling import BayesianModelSampling
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd_d = TabularCPD('diff', 2, [[0.6], [0.4]])
>>> cpd_i = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> cpd_g = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5], [0.4, 0.25,
... 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd_d, cpd_i, cpd_g)
>>> inference = BayesianModelSampling(student)
>>> evidence = [State('diff', 0)]
>>> inference.likelihood_weighted_sample(evidence, 2)
intel diff grade _weight
0 (intel, 0) (diff, 0) (grade, 1) 0.6
1 (intel, 1) (diff, 0) (grade, 1) 0.6
"""
sampled = DataFrame(index=range(size), columns=self.topological_order)
sampled['_weight'] = np.ones(size)
evidence_dict = {var: st for var, st in evidence}
for node in self.topological_order:
cpd = self.cpds[node]
states = [state for state in range(cpd.get_cardinality(node)[node])]
if cpd.evidence:
indices = [i for i, x in enumerate(self.topological_order) if x in cpd.evidence]
evidence = sampled.values[:, [indices]].tolist()
weights = list(map(lambda t: cpd.reduce(t[0], inplace=False).values, evidence))
if node in evidence_dict:
sampled[node] = (State(node, evidence_dict[node]), ) * size
for i in range(size):
sampled.loc[i, '_weight'] *= weights[i][evidence_dict[node]]
else:
sampled[node] = list(map(lambda t: State(node, t), sample_discrete(states, weights)))
else:
if node in evidence_dict:
sampled[node] = (State(node, evidence_dict[node]), ) * size
for i in range(size):
sampled.loc[i, '_weight'] *= cpd.values[evidence_dict[node]]
else:
sampled[node] = list(map(lambda t: State(node, t),
sample_discrete(states, cpd.values, size)))
return sampled
class GibbsSampling(MarkovChain):
"""
Class for performing Gibbs sampling.
Parameters:
-----------
model: BayesianModel or MarkovModel
Model from which variables are inherited and transition probabilites computed.
Public Methods:
---------------
set_start_state(state)
sample(start_state, size)
generate_sample(start_state, size)
Examples:
---------
Initialization from a BayesianModel object:
>>> from pgmpy.factors import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> intel_cpd = TabularCPD('intel', 2, [[0.7], [0.3]])
>>> sat_cpd = TabularCPD('sat', 2, [[0.95, 0.2], [0.05, 0.8]], evidence=['intel'], evidence_card=[2])
>>> student = BayesianModel()
>>> student.add_nodes_from(['intel', 'sat'])
>>> student.add_edge('intel', 'sat')
>>> student.add_cpds(intel_cpd, sat_cpd)
>>> from pgmpy.inference import GibbsSampling
>>> gibbs_chain = GibbsSampling(student)
Sample from it:
>>> gibbs_chain.sample(size=3)
intel sat
0 0 0
1 0 0
2 1 1
"""
def __init__(self, model=None):
super().__init__()
if isinstance(model, BayesianModel):
self._get_kernel_from_bayesian_model(model)
elif isinstance(model, MarkovModel):
self._get_kernel_from_markov_model(model)
def _get_kernel_from_bayesian_model(self, model):
"""
Computes the Gibbs transition models from a Bayesian Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: BayesianModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
self.cardinalities = {var: model.get_cpds(var).variable_card for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
cpds = [cpd for cpd in model.cpds if var in cpd.scope()]
prod_cpd = factor_product(*cpds)
kernel = {}
scope = set(prod_cpd.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(var, s) for var, s in zip(other_vars, tup) if var in scope]
prod_cpd_reduced = prod_cpd.reduce(states, inplace=False)
kernel[tup] = prod_cpd_reduced.values / sum(prod_cpd_reduced.values)
self.transition_models[var] = kernel
def _get_kernel_from_markov_model(self, model):
"""
Computes the Gibbs transition models from a Markov Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: MarkovModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
factors_dict = {var: [] for var in self.variables}
for factor in model.get_factors():
for var in factor.scope():
factors_dict[var].append(factor)
# Take factor product
factors_dict = {var: factor_product(*factors) if len(factors) > 1 else factors[0]
for var, factors in factors_dict.items()}
self.cardinalities = {var: factors_dict[var].get_cardinality(var)[var] for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
kernel = {}
factor = factors_dict[var]
scope = set(factor.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(var, s) for var, s in zip(other_vars, tup) if var in scope]
reduced_factor = factor.reduce(states, inplace=False)
kernel[tup] = reduced_factor.values / sum(reduced_factor.values)
self.transition_models[var] = kernel
def sample(self, start_state=None, size=1):
"""
Sample from the Markov Chain.
Parameters:
-----------
start_state: dict or array-like iterable
Representing the starting states of the variables. If None is passed, a random start_state is chosen.
size: int
Number of samples to be generated.
Return Type:
------------
pandas.DataFrame
Examples:
---------
>>> from pgmpy.factors import Factor
>>> from pgmpy.inference import GibbsSampling
>>> from pgmpy.models import MarkovModel
>>> model = MarkovModel([('A', 'B'), ('C', 'B')])
>>> factor_ab = Factor(['A', 'B'], [2, 2], [1, 2, 3, 4])
>>> factor_cb = Factor(['C', 'B'], [2, 2], [5, 6, 7, 8])
>>> model.add_factors(factor_ab, factor_cb)
>>> gibbs = GibbsSampling(model)
>>> gibbs.sample(size=4)
A B C
0 0 1 1
1 1 0 0
2 1 1 0
3 1 1 1
"""
if start_state is None and self.state is None:
self.state = self.random_state()
else:
self.set_start_state(start_state)
sampled = DataFrame(index=range(size), columns=self.variables)
sampled.loc[0] = [st for var, st in self.state]
for i in range(size - 1):
for j, (var, st) in enumerate(self.state):
other_st = tuple(st for v, st in self.state if var != v)
next_st = sample_discrete(list(range(self.cardinalities[var])),
self.transition_models[var][other_st])[0]
self.state[j] = State(var, next_st)
sampled.loc[i + 1] = [st for var, st in self.state]
return sampled
def generate_sample(self, start_state=None, size=1):
"""
Generator version of self.sample
Return Type:
------------
List of State namedtuples, representing the assignment to all variables of the model.
Examples:
---------
>>> from pgmpy.factors import Factor
>>> from pgmpy.inference import GibbsSampling
>>> from pgmpy.models import MarkovModel
>>> model = MarkovModel([('A', 'B'), ('C', 'B')])
>>> factor_ab = Factor(['A', 'B'], [2, 2], [1, 2, 3, 4])
>>> factor_cb = Factor(['C', 'B'], [2, 2], [5, 6, 7, 8])
>>> model.add_factors(factor_ab, factor_cb)
>>> gibbs = GibbsSampling(model)
>>> gen = gibbs.generate_sample(size=2)
>>> [sample for sample in gen]
[[State(var='C', state=1), State(var='B', state=1), State(var='A', state=0)],
[State(var='C', state=0), State(var='B', state=1), State(var='A', state=1)]]
"""
if start_state is None and self.state is None:
self.state = self.random_state()
else:
self.set_start_state(start_state)
for i in range(size):
for j, (var, st) in enumerate(self.state):
other_st = tuple(st for v, st in self.state if var != v)
next_st = sample_discrete(list(range(self.cardinalities[var])),
self.transition_models[var][other_st])[0]
self.state[j] = State(var, next_st)
yield self.state[:]
| mit |
Srisai85/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/sparse/test_series.py | 7 | 51517 | # pylint: disable-msg=E1101,W0612
import operator
import pytest
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, bdate_range
from pandas.core.common import isnull
from pandas.tseries.offsets import BDay
import pandas.util.testing as tm
from pandas.compat import range
from pandas import compat
from pandas.core.reshape.util import cartesian_product
import pandas.core.sparse.frame as spf
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas.core.sparse.api import SparseSeries
from pandas.tests.series.test_api import SharedWithSparse
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
index = np.arange(20)
arr[:2] = nan
arr[5:10] = nan
arr[-3:] = nan
return arr, index
def _test_data2():
# nan-based
arr = np.arange(15, dtype=float)
index = np.arange(15)
arr[7:12] = nan
arr[-1:] = nan
return arr, index
def _test_data1_zero():
# zero-based
arr, index = _test_data1()
arr[np.isnan(arr)] = 0
return arr, index
def _test_data2_zero():
# zero-based
arr, index = _test_data2()
arr[np.isnan(arr)] = 0
return arr, index
class TestSparseSeries(SharedWithSparse):
def setup_method(self, method):
arr, index = _test_data1()
date_index = bdate_range('1/1/2011', periods=len(index))
self.bseries = SparseSeries(arr, index=index, kind='block',
name='bseries')
self.ts = self.bseries
self.btseries = SparseSeries(arr, index=date_index, kind='block')
self.iseries = SparseSeries(arr, index=index, kind='integer',
name='iseries')
arr, index = _test_data2()
self.bseries2 = SparseSeries(arr, index=index, kind='block')
self.iseries2 = SparseSeries(arr, index=index, kind='integer')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0, name='zbseries')
self.ziseries = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
arr, index = _test_data2_zero()
self.zbseries2 = SparseSeries(arr, index=index, kind='block',
fill_value=0)
self.ziseries2 = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
def test_constructor_dtype(self):
arr = SparseSeries([np.nan, 1, 2, np.nan])
assert arr.dtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseSeries([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == np.float64
assert arr.fill_value == 0
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64, fill_value=np.nan)
assert arr.dtype == np.int64
assert np.isnan(arr.fill_value)
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
arr = SparseSeries([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == np.int64
assert arr.fill_value == 0
def test_iteration_and_str(self):
[x for x in self.bseries]
str(self.bseries)
def test_construct_DataFrame_with_sp_series(self):
# it works!
df = DataFrame({'col': self.bseries})
# printing & access
df.iloc[:1]
df['col']
df.dtypes
str(df)
tm.assert_sp_series_equal(df['col'], self.bseries, check_names=False)
result = df.iloc[:, 0]
tm.assert_sp_series_equal(result, self.bseries, check_names=False)
# blocking
expected = Series({'col': 'float64:sparse'})
result = df.ftypes
tm.assert_series_equal(expected, result)
def test_constructor_preserve_attr(self):
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
assert arr.dtype == np.int64
assert arr.fill_value == 0
s = pd.SparseSeries(arr, name='x')
assert s.dtype == np.int64
assert s.fill_value == 0
def test_series_density(self):
# GH2803
ts = Series(np.random.randn(10))
ts[2:-2] = nan
sts = ts.to_sparse()
density = sts.density # don't die
assert density == 4 / 10.0
def test_sparse_to_dense(self):
arr, index = _test_data1()
series = self.bseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='bseries'))
# see gh-14647
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
series = self.bseries.to_dense(sparse_only=True)
indexer = np.isfinite(arr)
exp = Series(arr[indexer], index=index[indexer], name='bseries')
tm.assert_series_equal(series, exp)
series = self.iseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='iseries'))
arr, index = _test_data1_zero()
series = self.zbseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='zbseries'))
series = self.ziseries.to_dense()
tm.assert_series_equal(series, Series(arr))
def test_to_dense_fill_value(self):
s = pd.Series([1, np.nan, np.nan, 3, np.nan])
res = SparseSeries(s).to_dense()
tm.assert_series_equal(res, s)
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([1, np.nan, 0, 3, 0])
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseSeries(s).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
def test_dense_to_sparse(self):
series = self.bseries.to_dense()
bseries = series.to_sparse(kind='block')
iseries = series.to_sparse(kind='integer')
tm.assert_sp_series_equal(bseries, self.bseries)
tm.assert_sp_series_equal(iseries, self.iseries, check_names=False)
assert iseries.name == self.bseries.name
assert len(series) == len(bseries)
assert len(series) == len(iseries)
assert series.shape == bseries.shape
assert series.shape == iseries.shape
# non-NaN fill value
series = self.zbseries.to_dense()
zbseries = series.to_sparse(kind='block', fill_value=0)
ziseries = series.to_sparse(kind='integer', fill_value=0)
tm.assert_sp_series_equal(zbseries, self.zbseries)
tm.assert_sp_series_equal(ziseries, self.ziseries, check_names=False)
assert ziseries.name == self.zbseries.name
assert len(series) == len(zbseries)
assert len(series) == len(ziseries)
assert series.shape == zbseries.shape
assert series.shape == ziseries.shape
def test_to_dense_preserve_name(self):
assert (self.bseries.name is not None)
result = self.bseries.to_dense()
assert result.name == self.bseries.name
def test_constructor(self):
# test setup guys
assert np.isnan(self.bseries.fill_value)
assert isinstance(self.bseries.sp_index, BlockIndex)
assert np.isnan(self.iseries.fill_value)
assert isinstance(self.iseries.sp_index, IntIndex)
assert self.zbseries.fill_value == 0
tm.assert_numpy_array_equal(self.zbseries.values.values,
self.bseries.to_dense().fillna(0).values)
# pass SparseSeries
def _check_const(sparse, name):
# use passed series name
result = SparseSeries(sparse)
tm.assert_sp_series_equal(result, sparse)
assert sparse.name == name
assert result.name == name
# use passed name
result = SparseSeries(sparse, name='x')
tm.assert_sp_series_equal(result, sparse, check_names=False)
assert result.name == 'x'
_check_const(self.bseries, 'bseries')
_check_const(self.iseries, 'iseries')
_check_const(self.zbseries, 'zbseries')
# Sparse time series works
date_index = bdate_range('1/1/2000', periods=len(self.bseries))
s5 = SparseSeries(self.bseries, index=date_index)
assert isinstance(s5, SparseSeries)
# pass Series
bseries2 = SparseSeries(self.bseries.to_dense())
tm.assert_numpy_array_equal(self.bseries.sp_values, bseries2.sp_values)
# pass dict?
# don't copy the data by default
values = np.ones(self.bseries.npoints)
sp = SparseSeries(values, sparse_index=self.bseries.sp_index)
sp.sp_values[:5] = 97
assert values[0] == 97
assert len(sp) == 20
assert sp.shape == (20, )
# but can make it copy!
sp = SparseSeries(values, sparse_index=self.bseries.sp_index,
copy=True)
sp.sp_values[:5] = 100
assert values[0] == 97
assert len(sp) == 20
assert sp.shape == (20, )
def test_constructor_scalar(self):
data = 5
sp = SparseSeries(data, np.arange(100))
sp = sp.reindex(np.arange(200))
assert (sp.loc[:99] == data).all()
assert isnull(sp.loc[100:]).all()
data = np.nan
sp = SparseSeries(data, np.arange(100))
assert len(sp) == 100
assert sp.shape == (100, )
def test_constructor_ndarray(self):
pass
def test_constructor_nonnan(self):
arr = [0, 0, 0, nan, nan]
sp_series = SparseSeries(arr, fill_value=0)
tm.assert_numpy_array_equal(sp_series.values.values, np.array(arr))
assert len(sp_series) == 5
assert sp_series.shape == (5, )
def test_constructor_empty(self):
# see gh-9272
sp = SparseSeries()
assert len(sp.index) == 0
assert sp.shape == (0, )
def test_copy_astype(self):
cop = self.bseries.astype(np.float64)
assert cop is not self.bseries
assert cop.sp_index is self.bseries.sp_index
assert cop.dtype == np.float64
cop2 = self.iseries.copy()
tm.assert_sp_series_equal(cop, self.bseries)
tm.assert_sp_series_equal(cop2, self.iseries)
# test that data is copied
cop[:5] = 97
assert cop.sp_values[0] == 97
assert self.bseries.sp_values[0] != 97
# correct fill value
zbcop = self.zbseries.copy()
zicop = self.ziseries.copy()
tm.assert_sp_series_equal(zbcop, self.zbseries)
tm.assert_sp_series_equal(zicop, self.ziseries)
# no deep copy
view = self.bseries.copy(deep=False)
view.sp_values[:5] = 5
assert (self.bseries.sp_values[:5] == 5).all()
def test_shape(self):
# see gh-10452
assert self.bseries.shape == (20, )
assert self.btseries.shape == (20, )
assert self.iseries.shape == (20, )
assert self.bseries2.shape == (15, )
assert self.iseries2.shape == (15, )
assert self.zbseries2.shape == (15, )
assert self.ziseries2.shape == (15, )
def test_astype(self):
with pytest.raises(ValueError):
self.bseries.astype(np.int64)
def test_astype_all(self):
orig = pd.Series(np.array([1, 2, 3]))
s = SparseSeries(orig)
types = [np.float64, np.float32, np.int64,
np.int32, np.int16, np.int8]
for typ in types:
res = s.astype(typ)
assert res.dtype == typ
tm.assert_series_equal(res.to_dense(), orig.astype(typ))
def test_kind(self):
assert self.bseries.kind == 'block'
assert self.iseries.kind == 'integer'
def test_to_frame(self):
# GH 9850
s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x')
exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_sp_frame_equal(s.to_frame(), exp)
exp = pd.SparseDataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_sp_frame_equal(s.to_frame(name='y'), exp)
s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x', fill_value=0)
exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]},
default_fill_value=0)
tm.assert_sp_frame_equal(s.to_frame(), exp)
exp = pd.DataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_frame_equal(s.to_frame(name='y').to_dense(), exp)
def test_pickle(self):
def _test_roundtrip(series):
unpickled = tm.round_trip_pickle(series)
tm.assert_sp_series_equal(series, unpickled)
tm.assert_series_equal(series.to_dense(), unpickled.to_dense())
self._check_all(_test_roundtrip)
def _check_all(self, check_func):
check_func(self.bseries)
check_func(self.iseries)
check_func(self.zbseries)
check_func(self.ziseries)
def test_getitem(self):
def _check_getitem(sp, dense):
for idx, val in compat.iteritems(dense):
tm.assert_almost_equal(val, sp[idx])
for i in range(len(dense)):
tm.assert_almost_equal(sp[i], dense[i])
# j = np.float64(i)
# assert_almost_equal(sp[j], dense[j])
# API change 1/6/2012
# negative getitem works
# for i in xrange(len(dense)):
# assert_almost_equal(sp[-i], dense[-i])
_check_getitem(self.bseries, self.bseries.to_dense())
_check_getitem(self.btseries, self.btseries.to_dense())
_check_getitem(self.zbseries, self.zbseries.to_dense())
_check_getitem(self.iseries, self.iseries.to_dense())
_check_getitem(self.ziseries, self.ziseries.to_dense())
# exception handling
pytest.raises(Exception, self.bseries.__getitem__,
len(self.bseries) + 1)
# index not contained
pytest.raises(Exception, self.btseries.__getitem__,
self.btseries.index[-1] + BDay())
def test_get_get_value(self):
tm.assert_almost_equal(self.bseries.get(10), self.bseries[10])
assert self.bseries.get(len(self.bseries) + 1) is None
dt = self.btseries.index[10]
result = self.btseries.get(dt)
expected = self.btseries.to_dense()[dt]
tm.assert_almost_equal(result, expected)
tm.assert_almost_equal(self.bseries.get_value(10), self.bseries[10])
def test_set_value(self):
idx = self.btseries.index[7]
self.btseries.set_value(idx, 0)
assert self.btseries[idx] == 0
self.iseries.set_value('foobar', 0)
assert self.iseries.index[-1] == 'foobar'
assert self.iseries['foobar'] == 0
def test_getitem_slice(self):
idx = self.bseries.index
res = self.bseries[::2]
assert isinstance(res, SparseSeries)
expected = self.bseries.reindex(idx[::2])
tm.assert_sp_series_equal(res, expected)
res = self.bseries[:5]
assert isinstance(res, SparseSeries)
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))
res = self.bseries[5:]
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))
# negative indices
res = self.bseries[:-3]
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))
def test_take(self):
def _compare_with_dense(sp):
dense = sp.to_dense()
def _compare(idx):
dense_result = dense.take(idx).values
sparse_result = sp.take(idx)
assert isinstance(sparse_result, SparseSeries)
tm.assert_almost_equal(dense_result,
sparse_result.values.values)
_compare([1., 2., 3., 4., 5., 0.])
_compare([7, 2, 9, 0, 4])
_compare([3, 6, 3, 4, 7])
self._check_all(_compare_with_dense)
pytest.raises(Exception, self.bseries.take,
[0, len(self.bseries) + 1])
# Corner case
sp = SparseSeries(np.ones(10) * nan)
exp = pd.Series(np.repeat(nan, 5))
tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp)
def test_numpy_take(self):
sp = SparseSeries([1.0, 2.0, 3.0])
indices = [1, 2]
tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(),
np.take(sp.to_dense(), indices, axis=0))
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.take,
sp, indices, out=np.empty(sp.shape))
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.take,
sp, indices, mode='clip')
def test_setitem(self):
self.bseries[5] = 7.
assert self.bseries[5] == 7.
def test_setslice(self):
self.bseries[5:10] = 7.
tm.assert_series_equal(self.bseries[5:10].to_dense(),
Series(7., index=range(5, 10),
name=self.bseries.name))
def test_operators(self):
def _check_op(a, b, op):
sp_result = op(a, b)
adense = a.to_dense() if isinstance(a, SparseSeries) else a
bdense = b.to_dense() if isinstance(b, SparseSeries) else b
dense_result = op(adense, bdense)
tm.assert_almost_equal(sp_result.to_dense(), dense_result)
def check(a, b):
_check_op(a, b, operator.add)
_check_op(a, b, operator.sub)
_check_op(a, b, operator.truediv)
_check_op(a, b, operator.floordiv)
_check_op(a, b, operator.mul)
_check_op(a, b, lambda x, y: operator.add(y, x))
_check_op(a, b, lambda x, y: operator.sub(y, x))
_check_op(a, b, lambda x, y: operator.truediv(y, x))
_check_op(a, b, lambda x, y: operator.floordiv(y, x))
_check_op(a, b, lambda x, y: operator.mul(y, x))
# NaN ** 0 = 1 in C?
# _check_op(a, b, operator.pow)
# _check_op(a, b, lambda x, y: operator.pow(y, x))
check(self.bseries, self.bseries)
check(self.iseries, self.iseries)
check(self.bseries, self.iseries)
check(self.bseries, self.bseries2)
check(self.bseries, self.iseries2)
check(self.iseries, self.iseries2)
# scalar value
check(self.bseries, 5)
# zero-based
check(self.zbseries, self.zbseries * 2)
check(self.zbseries, self.zbseries2)
check(self.ziseries, self.ziseries2)
# with dense
result = self.bseries + self.bseries.to_dense()
tm.assert_sp_series_equal(result, self.bseries + self.bseries)
def test_binary_operators(self):
# skipping for now #####
import pytest
pytest.skip("skipping sparse binary operators test")
def _check_inplace_op(iop, op):
tmp = self.bseries.copy()
expected = op(tmp, self.bseries)
iop(tmp, self.bseries)
tm.assert_sp_series_equal(tmp, expected)
inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']
for op in inplace_ops:
_check_inplace_op(getattr(operator, "i%s" % op),
getattr(operator, op))
def test_abs(self):
s = SparseSeries([1, 2, -3], name='x')
expected = SparseSeries([1, 2, 3], name='x')
result = s.abs()
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = np.abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
s = SparseSeries([1, -2, 2, -3], fill_value=-2, name='x')
expected = SparseSeries([1, 2, 3], sparse_index=s.sp_index,
fill_value=2, name='x')
result = s.abs()
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
result = np.abs(s)
tm.assert_sp_series_equal(result, expected)
assert result.name == 'x'
def test_reindex(self):
def _compare_with_series(sps, new_index):
spsre = sps.reindex(new_index)
series = sps.to_dense()
seriesre = series.reindex(new_index)
seriesre = seriesre.to_sparse(fill_value=sps.fill_value)
tm.assert_sp_series_equal(spsre, seriesre)
tm.assert_series_equal(spsre.to_dense(), seriesre.to_dense())
_compare_with_series(self.bseries, self.bseries.index[::2])
_compare_with_series(self.bseries, list(self.bseries.index[::2]))
_compare_with_series(self.bseries, self.bseries.index[:10])
_compare_with_series(self.bseries, self.bseries.index[5:])
_compare_with_series(self.zbseries, self.zbseries.index[::2])
_compare_with_series(self.zbseries, self.zbseries.index[:10])
_compare_with_series(self.zbseries, self.zbseries.index[5:])
# special cases
same_index = self.bseries.reindex(self.bseries.index)
tm.assert_sp_series_equal(self.bseries, same_index)
assert same_index is not self.bseries
# corner cases
sp = SparseSeries([], index=[])
# TODO: sp_zero is not used anywhere...remove?
sp_zero = SparseSeries([], index=[], fill_value=0) # noqa
_compare_with_series(sp, np.arange(10))
# with copy=False
reindexed = self.bseries.reindex(self.bseries.index, copy=True)
reindexed.sp_values[:] = 1.
assert (self.bseries.sp_values != 1.).all()
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
tm.assert_numpy_array_equal(self.bseries.sp_values, np.repeat(1., 10))
def test_sparse_reindex(self):
length = 10
def _check(values, index1, index2, fill_value):
first_series = SparseSeries(values, sparse_index=index1,
fill_value=fill_value)
reindexed = first_series.sparse_reindex(index2)
assert reindexed.sp_index is index2
int_indices1 = index1.to_int_index().indices
int_indices2 = index2.to_int_index().indices
expected = Series(values, index=int_indices1)
expected = expected.reindex(int_indices2).fillna(fill_value)
tm.assert_almost_equal(expected.values, reindexed.sp_values)
# make sure level argument asserts
# TODO: expected is not used anywhere...remove?
expected = expected.reindex(int_indices2).fillna(fill_value) # noqa
def _check_with_fill_value(values, first, second, fill_value=nan):
i_index1 = IntIndex(length, first)
i_index2 = IntIndex(length, second)
b_index1 = i_index1.to_block_index()
b_index2 = i_index2.to_block_index()
_check(values, i_index1, i_index2, fill_value)
_check(values, b_index1, b_index2, fill_value)
def _check_all(values, first, second):
_check_with_fill_value(values, first, second, fill_value=nan)
_check_with_fill_value(values, first, second, fill_value=0)
index1 = [2, 4, 5, 6, 8, 9]
values1 = np.arange(6.)
_check_all(values1, index1, [2, 4, 5])
_check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])
_check_all(values1, index1, [0, 1])
_check_all(values1, index1, [0, 1, 7, 8, 9])
_check_all(values1, index1, [])
first_series = SparseSeries(values1,
sparse_index=IntIndex(length, index1),
fill_value=nan)
with tm.assert_raises_regex(TypeError,
'new index must be a SparseIndex'):
reindexed = first_series.sparse_reindex(0) # noqa
def test_repr(self):
# TODO: These aren't used
bsrepr = repr(self.bseries) # noqa
isrepr = repr(self.iseries) # noqa
def test_iter(self):
pass
def test_truncate(self):
pass
def test_fillna(self):
pass
def test_groupby(self):
pass
def test_reductions(self):
def _compare_with_dense(obj, op):
sparse_result = getattr(obj, op)()
series = obj.to_dense()
dense_result = getattr(series, op)()
assert sparse_result == dense_result
to_compare = ['count', 'sum', 'mean', 'std', 'var', 'skew']
def _compare_all(obj):
for op in to_compare:
_compare_with_dense(obj, op)
_compare_all(self.bseries)
self.bseries.sp_values[5:10] = np.NaN
_compare_all(self.bseries)
_compare_all(self.zbseries)
self.zbseries.sp_values[5:10] = np.NaN
_compare_all(self.zbseries)
series = self.zbseries.copy()
series.fill_value = 2
_compare_all(series)
nonna = Series(np.random.randn(20)).to_sparse()
_compare_all(nonna)
nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)
_compare_all(nonna2)
def test_dropna(self):
sp = SparseSeries([0, 0, 0, nan, nan, 5, 6], fill_value=0)
sp_valid = sp.valid()
expected = sp.to_dense().valid()
expected = expected[expected != 0]
exp_arr = pd.SparseArray(expected.values, fill_value=0, kind='block')
tm.assert_sp_array_equal(sp_valid.values, exp_arr)
tm.assert_index_equal(sp_valid.index, expected.index)
assert len(sp_valid.sp_values) == 2
result = self.bseries.dropna()
expected = self.bseries.to_dense().dropna()
assert not isinstance(result, SparseSeries)
tm.assert_series_equal(result, expected)
def test_homogenize(self):
def _check_matches(indices, expected):
data = {}
for i, idx in enumerate(indices):
data[i] = SparseSeries(idx.to_int_index().indices,
sparse_index=idx, fill_value=np.nan)
# homogenized is only valid with NaN fill values
homogenized = spf.homogenize(data)
for k, v in compat.iteritems(homogenized):
assert (v.sp_index.equals(expected))
indices1 = [BlockIndex(10, [2], [7]), BlockIndex(10, [1, 6], [3, 4]),
BlockIndex(10, [0], [10])]
expected1 = BlockIndex(10, [2, 6], [2, 3])
_check_matches(indices1, expected1)
indices2 = [BlockIndex(10, [2], [7]), BlockIndex(10, [2], [7])]
expected2 = indices2[0]
_check_matches(indices2, expected2)
# must have NaN fill value
data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,
fill_value=0)}
with tm.assert_raises_regex(TypeError, "NaN fill value"):
spf.homogenize(data)
def test_fill_value_corner(self):
cop = self.zbseries.copy()
cop.fill_value = 0
result = self.bseries / cop
assert np.isnan(result.fill_value)
cop2 = self.zbseries.copy()
cop2.fill_value = 1
result = cop2 / cop
# 1 / 0 is inf
assert np.isinf(result.fill_value)
def test_fill_value_when_combine_const(self):
# GH12723
s = SparseSeries([0, 1, np.nan, 3, 4, 5], index=np.arange(6))
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
def test_shift(self):
series = SparseSeries([nan, 1., 2., 3., nan, nan], index=np.arange(6))
shifted = series.shift(0)
assert shifted is not series
tm.assert_sp_series_equal(shifted, series)
f = lambda s: s.shift(1)
_dense_series_compare(series, f)
f = lambda s: s.shift(-2)
_dense_series_compare(series, f)
series = SparseSeries([nan, 1., 2., 3., nan, nan],
index=bdate_range('1/1/2000', periods=6))
f = lambda s: s.shift(2, freq='B')
_dense_series_compare(series, f)
f = lambda s: s.shift(2, freq=BDay())
_dense_series_compare(series, f)
def test_shift_nan(self):
# GH 12908
orig = pd.Series([np.nan, 2, np.nan, 4, 0, np.nan, 0])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(1),
orig.shift(1).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(2),
orig.shift(2).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(3),
orig.shift(3).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-1),
orig.shift(-1).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-2),
orig.shift(-2).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-3),
orig.shift(-3).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-4),
orig.shift(-4).to_sparse(fill_value=0))
def test_shift_dtype(self):
# GH 12908
orig = pd.Series([1, 2, 3, 4], dtype=np.int64)
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
sparse = orig.to_sparse(fill_value=np.nan)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=np.nan))
# shift(1) or more span changes dtype to float64
tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
def test_shift_dtype_fill_value(self):
# GH 12908
orig = pd.Series([1, 0, 0, 4], dtype=np.int64)
for v in [0, 1, np.nan]:
sparse = orig.to_sparse(fill_value=v)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(1),
orig.shift(1).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(2),
orig.shift(2).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(3),
orig.shift(3).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-1),
orig.shift(-1).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-2),
orig.shift(-2).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-3),
orig.shift(-3).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-4),
orig.shift(-4).to_sparse(fill_value=v))
def test_combine_first(self):
s = self.bseries
result = s[::2].combine_first(s)
result2 = s[::2].combine_first(s.to_dense())
expected = s[::2].to_dense().combine_first(s.to_dense())
expected = expected.to_sparse(fill_value=s.fill_value)
tm.assert_sp_series_equal(result, result2)
tm.assert_sp_series_equal(result, expected)
class TestSparseHandlingMultiIndexes(object):
def setup_method(self, method):
miindex = pd.MultiIndex.from_product(
[["x", "y"], ["10", "20"]], names=['row-foo', 'row-bar'])
micol = pd.MultiIndex.from_product(
[['a', 'b', 'c'], ["1", "2"]], names=['col-foo', 'col-bar'])
dense_multiindex_frame = pd.DataFrame(
index=miindex, columns=micol).sort_index().sort_index(axis=1)
self.dense_multiindex_frame = dense_multiindex_frame.fillna(value=3.14)
def test_to_sparse_preserve_multiindex_names_columns(self):
sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
sparse_multiindex_frame = sparse_multiindex_frame.copy()
tm.assert_index_equal(sparse_multiindex_frame.columns,
self.dense_multiindex_frame.columns)
def test_round_trip_preserve_multiindex_names(self):
sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
round_trip_multiindex_frame = sparse_multiindex_frame.to_dense()
tm.assert_frame_equal(self.dense_multiindex_frame,
round_trip_multiindex_frame,
check_column_type=True,
check_names=True)
class TestSparseSeriesScipyInteraction(object):
# Issue 8048: add SparseSeries coo methods
def setup_method(self, method):
tm._skip_if_no_scipy()
import scipy.sparse
# SparseSeries inputs used in tests, the tests rely on the order
self.sparse_series = []
s = pd.Series([3.0, nan, 1.0, 2.0, nan, nan])
s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
self.sparse_series.append(s.to_sparse())
ss = self.sparse_series[0].copy()
ss.index.names = [3, 0, 1, 2]
self.sparse_series.append(ss)
ss = pd.Series([
nan
] * 12, index=cartesian_product((range(3), range(4)))).to_sparse()
for k, v in zip([(0, 0), (1, 2), (1, 3)], [3.0, 1.0, 2.0]):
ss[k] = v
self.sparse_series.append(ss)
# results used in tests
self.coo_matrices = []
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 0, 1])), shape=(3, 2)))
self.ils = [[(1, 2), (1, 1), (2, 1)], [(1, 1), (1, 2), (2, 1)],
[(1, 2, 'a'), (1, 1, 'b'), (2, 1, 'b')]]
self.jls = [[('a', 0), ('a', 1), ('b', 0), ('b', 1)], [0, 1]]
def test_to_coo_text_names_integer_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1], 'column_levels': [2, 3]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_integer_row_levels_sort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1],
'column_levels': [2, 3],
'sort_labels': True}
result = (self.coo_matrices[1], self.ils[1], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort_col_level_single(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B', 'C'],
'column_levels': ['D'],
'sort_labels': False}
result = (self.coo_matrices[2], self.ils[2], self.jls[1])
self._run_test(ss, kwargs, result)
def test_to_coo_integer_names_integer_row_levels_nosort(self):
ss = self.sparse_series[1]
kwargs = {'row_levels': [3, 0], 'column_levels': [1, 2]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B'], 'column_levels': ['C', 'D']}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_bad_partition_nonnull_intersection(self):
ss = self.sparse_series[0]
pytest.raises(ValueError, ss.to_coo, ['A', 'B', 'C'], ['C', 'D'])
def test_to_coo_bad_partition_small_union(self):
ss = self.sparse_series[0]
pytest.raises(ValueError, ss.to_coo, ['A'], ['C', 'D'])
def test_to_coo_nlevels_less_than_two(self):
ss = self.sparse_series[0]
ss.index = np.arange(len(ss.index))
pytest.raises(ValueError, ss.to_coo)
def test_to_coo_bad_ilevel(self):
ss = self.sparse_series[0]
pytest.raises(KeyError, ss.to_coo, ['A', 'B'], ['C', 'D', 'E'])
def test_to_coo_duplicate_index_entries(self):
ss = pd.concat([self.sparse_series[0],
self.sparse_series[0]]).to_sparse()
pytest.raises(ValueError, ss.to_coo, ['A', 'B'], ['C', 'D'])
def test_from_coo_dense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)
check = self.sparse_series[2]
tm.assert_sp_series_equal(ss, check)
def test_from_coo_nodense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=False)
check = self.sparse_series[2]
check = check.dropna().to_sparse()
tm.assert_sp_series_equal(ss, check)
def test_from_coo_long_repr(self):
# GH 13114
# test it doesn't raise error. Formatting is tested in test_format
tm._skip_if_no_scipy()
import scipy.sparse
sparse = SparseSeries.from_coo(scipy.sparse.rand(350, 18))
repr(sparse)
def _run_test(self, ss, kwargs, check):
results = ss.to_coo(**kwargs)
self._check_results_to_coo(results, check)
# for every test, also test symmetry property (transpose), switch
# row_levels and column_levels
d = kwargs.copy()
d['row_levels'] = kwargs['column_levels']
d['column_levels'] = kwargs['row_levels']
results = ss.to_coo(**d)
results = (results[0].T, results[2], results[1])
self._check_results_to_coo(results, check)
def _check_results_to_coo(self, results, check):
(A, il, jl) = results
(A_result, il_result, jl_result) = check
# convert to dense and compare
tm.assert_numpy_array_equal(A.todense(), A_result.todense())
# or compare directly as difference of sparse
# assert(abs(A - A_result).max() < 1e-12) # max is failing in python
# 2.6
assert il == il_result
assert jl == jl_result
def test_concat(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y')
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind='integer')
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind='block', fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_sparse_dense(self):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse = pd.SparseSeries(val1, name='x', kind=kind)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_value_counts(self):
vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx')
sparse = pd.SparseSeries(vals, name='xx')
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
sparse = pd.SparseSeries(vals, name='xx', fill_value=0)
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_value_counts_dup(self):
vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
# numeric op may cause sp_values to include the same value as
# fill_value
dense = pd.Series(vals, name='xx') / 0.
sparse = pd.SparseSeries(vals, name='xx') / 0.
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
vals = [1, 2, 0, 0, 0, 1, 2, 0, 0, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx') * 0.
sparse = pd.SparseSeries(vals, name='xx') * 0.
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_value_counts_int(self):
vals = [1, 2, 0, 1, 2, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx')
# fill_value is np.nan, but should not be included in the result
sparse = pd.SparseSeries(vals, name='xx')
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
sparse = pd.SparseSeries(vals, name='xx', fill_value=0)
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_isnull(self):
# GH 8276
s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')
res = s.isnull()
exp = pd.SparseSeries([True, True, False, False, True], name='xxx',
fill_value=True)
tm.assert_sp_series_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',
fill_value=0.)
res = s.isnull()
assert isinstance(res, pd.SparseSeries)
exp = pd.Series([True, False, False, False, False], name='xxx')
tm.assert_series_equal(res.to_dense(), exp)
def test_isnotnull(self):
# GH 8276
s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')
res = s.isnotnull()
exp = pd.SparseSeries([False, False, True, True, False], name='xxx',
fill_value=False)
tm.assert_sp_series_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',
fill_value=0.)
res = s.isnotnull()
assert isinstance(res, pd.SparseSeries)
exp = pd.Series([False, True, True, True, True], name='xxx')
tm.assert_series_equal(res.to_dense(), exp)
def _dense_series_compare(s, f):
result = f(s)
assert (isinstance(result, SparseSeries))
dense_result = f(s.to_dense())
tm.assert_series_equal(result.to_dense(), dense_result)
class TestSparseSeriesAnalytics(object):
def setup_method(self, method):
arr, index = _test_data1()
self.bseries = SparseSeries(arr, index=index, kind='block',
name='bseries')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0, name='zbseries')
def test_cumsum(self):
result = self.bseries.cumsum()
expected = SparseSeries(self.bseries.to_dense().cumsum())
tm.assert_sp_series_equal(result, expected)
result = self.zbseries.cumsum()
expected = self.zbseries.to_dense().cumsum()
tm.assert_series_equal(result, expected)
axis = 1 # Series is 1-D, so only axis = 0 is valid.
msg = "No axis named {axis}".format(axis=axis)
with tm.assert_raises_regex(ValueError, msg):
self.bseries.cumsum(axis=axis)
def test_numpy_cumsum(self):
result = np.cumsum(self.bseries)
expected = SparseSeries(self.bseries.to_dense().cumsum())
tm.assert_sp_series_equal(result, expected)
result = np.cumsum(self.zbseries)
expected = self.zbseries.to_dense().cumsum()
tm.assert_series_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
self.bseries, dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.cumsum,
self.zbseries, out=result)
def test_numpy_func_call(self):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ['sum', 'cumsum', 'var', 'mean',
'prod', 'cumprod', 'std', 'argsort',
'argmin', 'argmax', 'min', 'max']
for func in funcs:
for series in ('bseries', 'zbseries'):
getattr(np, func)(getattr(self, series))
| mit |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/factorization/python/ops/gmm_test.py | 44 | 8747 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers, self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [
np.mean(
points[assignments == center], axis=0)
for center in xrange(num_centers)
]
covs = [
np.cov(points[assignments == center].T)
for center in xrange(num_centers)
]
scores = []
for r in xrange(num_points):
scores.append(
np.sqrt(
np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])), points[r, :] -
means[assignments[r]])))
return (points, assignments, scores)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments, true_offsets = (
self.make_random_points(clusters, num_points))
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(input_fn=self.input_fn(points=points,
batch_size=num_points), steps=1)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
if __name__ == '__main__':
test.main()
| mit |
michalkurka/h2o-3 | h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_binomial_large.py | 2 | 116214 | from __future__ import print_function
import sys
sys.path.insert(1, "../../../")
import random
import os
import math
import numpy as np
import h2o
import time
from builtins import range
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
from scipy import stats
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
class TestGLMBinomial:
"""
This class is created to test the GLM algo with Binomial family. In this case, the relationship
between the response Y and predictor vector X is assumed to be
Prob(Y = 1|X) = exp(W^T * X + E)/(1+exp(W^T * X + E)) where E is unknown Gaussian noise. We
generate random data set using the exact formula. To evaluate the H2O GLM Model, we run the sklearn
logistic regression with the same data sets and compare the performance of the two. If they are close
enough within a certain tolerance, we declare the H2O model working. When regularization and other
parameters are enabled, we can evaluate H2O GLM model performance by comparing the logloss/accuracy
from H2O model and to the H2O model generated without regularization. As long as they do not deviate
too much, we consider the H2O model performance satisfactory.
In particular, I have written 8 tests in the hope to exercise as many parameters settings of the GLM
algo with Binomial distribution as possible. Tomas has requested 2 tests to be added to test his new
feature of missing_values_handling with predictors with both categorical/real columns. Here is a list
of all tests descriptions:
test1_glm_no_regularization(): sklearn logistic regression model is built.
H2O GLM is built for Binomial family with the same random data sets. We observe
the weights, confusion matrices from the two models. We compare the logloss, prediction
accuracy from the two models to determine if H2O GLM model shall pass the test.
test2_glm_lambda_search(): test lambda search with alpha set to 0.5 per Tomas's
suggestion. Make sure logloss and prediction accuracy generated here is comparable in
value to H2O GLM with no regularization.
test3_glm_grid_search_over_params(): test grid search over
various alpha values while lambda is set to be the best value obtained
from test 2. Cross validation with k=5 and random assignment is enabled
as well. The best model performance hopefully will generate logloss and
prediction accuracies close to H2O with no regularization in test 1.
test4_glm_remove_collinear_columns(): test parameter remove_collinear_columns=True
with lambda set to best lambda from test 2, alpha set to best alpha from Gridsearch
and solver set to the one which generate the smallest validation logloss. The same dataset
is used here except that we randomly choose predictor columns to repeat and scale.
Make sure logloss and prediction accuracies generated here is comparable in value
to H2O GLM model with no regularization.
test5_missing_values(): Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to replace a value with
nan and create missing values. Sklearn logistic regression model is built using the
data set where we have imputed the missing values. This Sklearn model will be used to
compare our H2O models with.
test6_enum_missing_values(): Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Again, we build a Sklearn
logistic regression model and compare our H2O model with it.
test7_missing_enum_values_lambda_search(): Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns) and setting lambda search to be True.
We use the same prediction data with missing values from test6. Next, we encode the categorical columns using
true one hot encoding since Lambda-search will be enabled with alpha set to 0.5. Since the encoding
is different in this case from test6, we will build a brand new Sklearn logistic regression model and
compare the best H2O model logloss/prediction accuracy with it.
"""
# parameters set by users, change with care
max_col_count = 50 # set maximum values of train/test row and column counts
max_col_count_ratio = 500 # set max row count to be multiples of col_count to avoid overfitting
min_col_count_ratio = 100 # set min row count to be multiples of col_count to avoid overfitting
###### for debugging
# max_col_count = 5 # set maximum values of train/test row and column counts
# max_col_count_ratio = 50 # set max row count to be multiples of col_count to avoid overfitting
# min_col_count_ratio = 10
max_p_value = 2 # set maximum predictor value
min_p_value = -2 # set minimum predictor value
max_w_value = 2 # set maximum weight value
min_w_value = -2 # set minimum weight value
enum_levels = 5 # maximum number of levels for categorical variables not counting NAs
class_method = 'probability' # can be 'probability' or 'threshold', control how discrete response is generated
test_class_method = 'probability' # for test data set
margin = 0.0 # only used when class_method = 'threshold'
test_class_margin = 0.2 # for test data set
family = 'binomial' # this test is for Binomial GLM
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets
training_filename = family+"_"+curr_time+"_training_set.csv"
training_filename_duplicate = family+"_"+curr_time+"_training_set_duplicate.csv"
training_filename_nans = family+"_"+curr_time+"_training_set_NA.csv"
training_filename_enum = family+"_"+curr_time+"_training_set_enum.csv"
training_filename_enum_true_one_hot = family+"_"+curr_time+"_training_set_enum_trueOneHot.csv"
training_filename_enum_nans = family+"_"+curr_time+"_training_set_enum_NAs.csv"
training_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_training_set_enum_NAs_trueOneHot.csv"
validation_filename = family+"_"+curr_time+"_validation_set.csv"
validation_filename_enum = family+"_"+curr_time+"_validation_set_enum.csv"
validation_filename_enum_true_one_hot = family+"_"+curr_time+"_validation_set_enum_trueOneHot.csv"
validation_filename_enum_nans = family+"_"+curr_time+"_validation_set_enum_NAs.csv"
validation_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_validation_set_enum_NAs_trueOneHot.csv"
test_filename = family+"_"+curr_time+"_test_set.csv"
test_filename_duplicate = family+"_"+curr_time+"_test_set_duplicate.csv"
test_filename_nans = family+"_"+curr_time+"_test_set_NA.csv"
test_filename_enum = family+"_"+curr_time+"_test_set_enum.csv"
test_filename_enum_true_one_hot = family+"_"+curr_time+"_test_set_enum_trueOneHot.csv"
test_filename_enum_nans = family+"_"+curr_time+"_test_set_enum_NAs.csv"
test_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_test_set_enum_NAs_trueOneHot.csv"
weight_filename = family+"_"+curr_time+"_weight.csv"
weight_filename_enum = family+"_"+curr_time+"_weight_enum.csv"
total_test_number = 7 # total number of tests being run for GLM Binomial family
ignored_eps = 1e-15 # if p-values < than this value, no comparison is performed, only for Gaussian
allowed_diff = 0.1 # tolerance of comparison for logloss/prediction accuracy, okay to be loose. Condition
# to run the codes are different
duplicate_col_counts = 5 # maximum number of times to duplicate a column
duplicate_threshold = 0.2 # for each column, a coin is tossed to see if we duplicate that column or not
duplicate_max_scale = 2 # maximum scale factor for duplicated columns
nan_fraction = 0.2 # denote maximum fraction of NA's to be inserted into a column
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[0])) # directory of this test file
enum_col = 0 # set maximum number of categorical columns in predictor
enum_level_vec = [] # vector containing number of levels for each categorical column
noise_std = 0 # noise variance in Binomial noise generation added to response
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
class_number = 2 # actual number of classes existed in data set, randomly generated later
data_type = 2 # determine data type of data set and weight, 1: integers, 2: real
# parameters denoting filenames with absolute paths
training_data_file = os.path.join(current_dir, training_filename)
training_data_file_duplicate = os.path.join(current_dir, training_filename_duplicate)
training_data_file_nans = os.path.join(current_dir, training_filename_nans)
training_data_file_enum = os.path.join(current_dir, training_filename_enum)
training_data_file_enum_true_one_hot = os.path.join(current_dir, training_filename_enum_true_one_hot)
training_data_file_enum_nans = os.path.join(current_dir, training_filename_enum_nans)
training_data_file_enum_nans_true_one_hot = os.path.join(current_dir, training_filename_enum_nans_true_one_hot)
validation_data_file = os.path.join(current_dir, validation_filename)
validation_data_file_enum = os.path.join(current_dir, validation_filename_enum)
validation_data_file_enum_true_one_hot = os.path.join(current_dir, validation_filename_enum_true_one_hot)
validation_data_file_enum_nans = os.path.join(current_dir, validation_filename_enum_nans)
validation_data_file_enum_nans_true_one_hot = os.path.join(current_dir, validation_filename_enum_nans_true_one_hot)
test_data_file = os.path.join(current_dir, test_filename)
test_data_file_duplicate = os.path.join(current_dir, test_filename_duplicate)
test_data_file_nans = os.path.join(current_dir, test_filename_nans)
test_data_file_enum = os.path.join(current_dir, test_filename_enum)
test_data_file_enum_true_one_hot = os.path.join(current_dir, test_filename_enum_true_one_hot)
test_data_file_enum_nans = os.path.join(current_dir, test_filename_enum_nans)
test_data_file_enum_nans_true_one_hot = os.path.join(current_dir, test_filename_enum_nans_true_one_hot)
weight_data_file = os.path.join(current_dir, weight_filename)
weight_data_file_enum = os.path.join(current_dir, weight_filename_enum)
# store template model performance values for later comparison
test1_model = None # store template model for later comparison
test1_model_metrics = None # store template model test metrics for later comparison
best_lambda = 0.0 # store best lambda obtained using lambda search
test_name = "pyunit_glm_binomial.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training data set, validation and test data sets that are used
# by many tests. We do not want to keep loading them for each set in the hope of
# saving time. Trading off memory and speed here.
x_indices = [] # store predictor indices in the data set
y_index = [] # store response index in the data set
training_data = [] # store training data set
test_data = [] # store test data set
valid_data = [] # store validation data set
training_data_grid = [] # store combined training and validation data set for cross validation
best_alpha = 0.5 # store best alpha value found
best_grid_logloss = -1 # store lowest MSE found from grid search
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
duplicate_col_indices = [] # denote column indices when column duplication is applied
duplicate_col_scales = [] # store scaling factor for all columns when duplication is applied
noise_var = noise_std*noise_std # Binomial noise variance
test_failed = 0 # count total number of tests that have failed
sklearn_class_weight = {} # used to make sure Sklearn will know the correct number of classes
def __init__(self):
self.setup()
def setup(self):
"""
This function performs all initializations necessary:
1. generates all the random values for our dynamic tests like the Binomial
noise std, column count and row count for training data set;
2. generate the training/validation/test data sets with only real values;
3. insert missing values into training/valid/test data sets.
4. taken the training/valid/test data sets, duplicate random certain columns,
each duplicated column is repeated for a random number of times and randomly scaled;
5. generate the training/validation/test data sets with predictors containing enum
and real values as well***.
6. insert missing values into the training/validation/test data sets with predictors
containing enum and real values as well
*** according to Tomas, when working with mixed predictors (contains both enum/real
value columns), the encoding used is different when regularization is enabled or disabled.
When regularization is enabled, true one hot encoding is enabled to encode the enum
values to binary bits. When regularization is disabled, a reference level plus one hot encoding
is enabled when encoding the enum values to binary bits. One data set is generated
when we work with mixed predictors.
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# randomly set Binomial noise standard deviation as a fraction of actual predictor standard deviation
self.noise_std = random.uniform(0, math.sqrt(pow((self.max_p_value - self.min_p_value), 2) / 12))
self.noise_var = self.noise_std*self.noise_std
# randomly determine data set size in terms of column and row counts
self.train_col_count = random.randint(3, self.max_col_count) # account for enum columns later
self.train_row_count = int(round(self.train_col_count*random.uniform(self.min_col_count_ratio,
self.max_col_count_ratio)))
# # DEBUGGING setup_data, remember to comment them out once done.
# self.train_col_count = 3
# self.train_row_count = 500
# end DEBUGGING
# randomly set number of enum and real columns in the data set
self.enum_col = random.randint(1, self.train_col_count-1)
# randomly set number of levels for each categorical column
self.enum_level_vec = np.random.random_integers(2, self.enum_levels-1, [self.enum_col, 1])
# generate real value weight vector and training/validation/test data sets for GLM
pyunit_utils.write_syn_floating_point_dataset_glm(self.training_data_file,
self.validation_data_file,
self.test_data_file, self.weight_data_file,
self.train_row_count, self.train_col_count, self.data_type,
self.max_p_value, self.min_p_value, self.max_w_value,
self.min_w_value, self.noise_std, self.family,
self.train_row_count, self.train_row_count,
class_number=self.class_number,
class_method=[self.class_method, self.class_method,
self.test_class_method],
class_margin=[self.margin, self.margin,
self.test_class_margin])
# randomly generate the duplicated and scaled columns
(self.duplicate_col_indices, self.duplicate_col_scales) = \
pyunit_utils.random_col_duplication(self.train_col_count, self.duplicate_threshold,
self.duplicate_col_counts, True, self.duplicate_max_scale)
# apply the duplication and scaling to training and test set
# need to add the response column to the end of duplicated column indices and scale
dup_col_indices = self.duplicate_col_indices
dup_col_indices.append(self.train_col_count)
dup_col_scale = self.duplicate_col_scales
dup_col_scale.append(1.0)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.training_data_file,
self.training_data_file_duplicate)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.test_data_file,
self.test_data_file_duplicate)
# insert NAs into training/test data sets
pyunit_utils.insert_nan_in_data(self.training_data_file, self.training_data_file_nans, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file, self.test_data_file_nans, self.nan_fraction)
# generate data sets with enum as well as real values
pyunit_utils.write_syn_mixed_dataset_glm(self.training_data_file_enum,
self.training_data_file_enum_true_one_hot,
self.validation_data_file_enum,
self.validation_data_file_enum_true_one_hot,
self.test_data_file_enum, self.test_data_file_enum_true_one_hot,
self.weight_data_file_enum, self.train_row_count, self.train_col_count,
self.max_p_value, self.min_p_value, self.max_w_value, self.min_w_value,
self.noise_std, self.family, self.train_row_count,
self.train_row_count, self.enum_col, self.enum_level_vec,
class_number=self.class_number,
class_method=[self.class_method,
self.class_method,
self.test_class_method],
class_margin=[self.margin, self.margin, self.test_class_margin])
# insert NAs into data set with categorical columns
pyunit_utils.insert_nan_in_data(self.training_data_file_enum, self.training_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum, self.validation_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum, self.test_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.training_data_file_enum_true_one_hot,
self.training_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum_true_one_hot,
self.validation_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum_true_one_hot,
self.test_data_file_enum_nans_true_one_hot,
self.nan_fraction)
# only preload data sets that will be used for multiple tests and change the response to enums
self.training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file))
# set indices for response and predictor columns in data set for H2O GLM model to use
self.y_index = self.training_data.ncol-1
self.x_indices = list(range(self.y_index))
# added the round() so that this will work on win8.
self.training_data[self.y_index] = self.training_data[self.y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if self.training_data[self.y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
self.valid_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file))
self.valid_data[self.y_index] = self.valid_data[self.y_index].round().asfactor()
self.test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file))
self.test_data[self.y_index] = self.test_data[self.y_index].round().asfactor()
# make a bigger training set for grid search by combining data from validation data set
self.training_data_grid = self.training_data.rbind(self.valid_data)
# setup_data sklearn class weight of all ones. Used only to make sure sklearn know the correct number of classes
for ind in range(self.class_number):
self.sklearn_class_weight[ind] = 1.0
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def teardown(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
remove_files = []
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
if sum(self.test_failed_array[0:4]):
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file, self.validation_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
else: # remove those files instead of moving them
remove_files.append(self.training_data_file)
remove_files.append(self.validation_data_file)
remove_files.append(self.test_data_file)
if sum(self.test_failed_array[0:6]):
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file, self.weight_filename)
else:
remove_files.append(self.weight_data_file)
if self.test_failed_array[3]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_duplicate, self.test_filename_duplicate)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_duplicate,
self.training_filename_duplicate)
else:
remove_files.append(self.training_data_file_duplicate)
remove_files.append(self.test_data_file_duplicate)
if self.test_failed_array[4]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_nans, self.training_filename_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_nans, self.test_filename_nans)
else:
remove_files.append(self.training_data_file_nans)
remove_files.append(self.test_data_file_nans)
if self.test_failed_array[5]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans,
self.training_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans, self.test_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else:
remove_files.append(self.training_data_file_enum_nans)
remove_files.append(self.training_data_file_enum)
remove_files.append(self.test_data_file_enum_nans)
remove_files.append(self.test_data_file_enum)
remove_files.append(self.validation_data_file_enum_nans)
remove_files.append(self.validation_data_file_enum)
remove_files.append(self.weight_data_file_enum)
if self.test_failed_array[6]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans_true_one_hot,
self.training_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file_enum_nans_true_one_hot,
self.validation_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans_true_one_hot,
self.test_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else:
remove_files.append(self.training_data_file_enum_nans_true_one_hot)
remove_files.append(self.training_data_file_enum_true_one_hot)
remove_files.append(self.validation_data_file_enum_nans_true_one_hot)
remove_files.append(self.validation_data_file_enum_true_one_hot)
remove_files.append(self.test_data_file_enum_nans_true_one_hot)
remove_files.append(self.test_data_file_enum_true_one_hot)
if not(self.test_failed): # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
# remove any csv files left in test directory, do not remove them, shared computing resources
if len(remove_files) > 0:
for file in remove_files:
pyunit_utils.remove_files(file)
def test1_glm_no_regularization(self):
"""
In this test, a sklearn logistic regression model and a H2O GLM are built for Binomial family with the same
random data sets. We observe the weights, confusion matrices from the two models. We compare the logloss,
prediction accuracy from the two models to determine if H2O GLM model shall pass the test.
"""
print("*******************************************************************************************")
print("Test1: build H2O GLM with Binomial with no regularization.")
h2o.cluster_info()
# training result from python Sklearn logistic regression model
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file, self.test_data_file, False, False)
# build our H2O model
self.test1_model = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0)
self.test1_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data)
# calculate test metrics
self.test1_model_metrics = self.test1_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed # used to determine if the current test has failed
# print out comparison results for weight/logloss/prediction accuracy
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(self.test1_model,
self.test1_model_metrics,
self.family, "\nTest1 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from"
" test dataset ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test "
"dataset ...."],
h2o_att_str=[
"H2O intercept and weights: \n",
"H2O logloss from training dataset: ",
"H2O logloss from test dataset",
"H2O confusion matrix from training "
"dataset: \n",
"H2O confusion matrix from test"
" dataset: \n",
"H2O accuracy from training dataset: ",
"H2O accuracy from test dataset: "],
template_att_str=[
"Sklearn intercept and weights: \n",
"Sklearn logloss from training "
"dataset: ",
"Sklearn logloss from test dataset: ",
"Sklearn confusion matrix from"
" training dataset: \n",
"Sklearn confusion matrix from test "
"dataset: \n",
"Sklearn accuracy from training "
"dataset: ",
"Sklearn accuracy from test "
"dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too "
"much!", "", "",
"Accuracies from training dataset "
"differ too much!",
"Accuracies from test dataset differ "
"too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are "
"close enough!",
"Logloss from test dataset are close "
"enough!", "", "",
"Accuracies from training dataset are "
"close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test1_glm_no_regularization",
num_test_failed, self.test_failed)
self.test_num += 1 # update test index
def test2_glm_lambda_search(self):
"""
This test is used to test the lambda search. Recall that lambda search enables efficient and
automatic search for the optimal value of the lambda parameter. When lambda search is enabled,
GLM will first fit a model with maximum regularization and then keep decreasing it until
over-fitting occurs. The resulting model is based on the best lambda value. According to Tomas,
set alpha = 0.5 and enable validation but not cross-validation.
"""
print("*******************************************************************************************")
print("Test2: tests the lambda search.")
h2o.cluster_info()
# generate H2O model with lambda search enabled
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20)
model_h2o_0p5.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data,
validation_frame=self.valid_data)
# get best lambda here
self.best_lambda = pyunit_utils.get_train_glm_params(model_h2o_0p5, 'best_lambda')
# get test performance here
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM and test1 H2O model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o_0p5, h2o_model_0p5_test_metrics,
self.family, "\nTest2 Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and"
" weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O lambda search intercept and "
"weights: \n",
"H2O lambda search logloss from"
" training dataset: ",
"H2O lambda search logloss from test "
"dataset",
"H2O lambda search confusion matrix "
"from training dataset: \n",
"H2O lambda search confusion matrix "
"from test dataset: \n",
"H2O lambda search accuracy from "
"training dataset: ",
"H2O lambda search accuracy from test"
" dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from "
"training dataset: ",
"H2O test1 template logloss from "
"test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from "
"training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, False, True, True, True,
True, True],
just_print=[True, False, False, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test2_glm_lambda_search",
num_test_failed, self.test_failed)
self.test_num += 1
def test3_glm_grid_search(self):
"""
This test is used to test GridSearch with the following parameters:
1. Lambda = best_lambda value from test2
2. alpha = [0 0.5 0.99]
3. cross-validation with k = 5, fold_assignment = "Random"
We will look at the best results from the grid search and compare it with H2O model built in test 1.
:return: None
"""
print("*******************************************************************************************")
print("Test3: explores various parameter settings in training the GLM using GridSearch using solver ")
h2o.cluster_info()
hyper_parameters = {'alpha': [0, 0.5, 0.99]} # set hyper_parameters for grid search
# train H2O GLM model with grid search
model_h2o_gridsearch = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, nfolds=5,
fold_assignment='Random'), hyper_parameters)
model_h2o_gridsearch.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data_grid)
# print out the model sequence ordered by the best validation logloss values, thanks Ludi!
temp_model = model_h2o_gridsearch.sort_by("logloss(xval=True)")
# obtain the model ID of best model (with smallest MSE) and use that for our evaluation
best_model_id = temp_model['Model Id'][0]
self.best_grid_logloss = temp_model['logloss(xval=True)'][0]
self.best_alpha = model_h2o_gridsearch.get_hyperparams(best_model_id)
best_model = h2o.get_model(best_model_id)
best_model_test_metrics = best_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM with H2O model from test 1
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(best_model, best_model_test_metrics,
self.family,
"\nTest3 " + " Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and"
" weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test dataset"
" ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test "
" sdataset ...."],
h2o_att_str=[
"H2O grid search intercept and "
"weights: \n",
"H2O grid search logloss from training"
" dataset: ",
"H2O grid search logloss from test "
"dataset",
"H2O grid search confusion matrix from"
" training dataset: \n",
"H2O grid search confusion matrix from"
" test dataset: \n",
"H2O grid search accuracy from"
" training dataset: ",
"H2O grid search accuracy from test "
"dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from"
" training dataset: ",
"H2O test1 template logloss from"
" test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from"
" training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test_glm_grid_search_over_params",
num_test_failed, self.test_failed)
self.test_num += 1
def test4_glm_remove_collinear_columns(self):
"""
With the best parameters obtained from test 3 grid search, we will trained GLM
with duplicated columns and enable remove_collinear_columns and see if the
algorithm catches the duplicated columns. We will compare the results with test
1 results.
"""
print("*******************************************************************************************")
print("Test4: test the GLM remove_collinear_columns.")
h2o.cluster_info()
# read in training data sets with duplicated columns
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_duplicate))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_duplicate))
y_index = training_data.ncol-1
x_indices = list(range(y_index))
# change response variable to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
test_data[y_index] = test_data[y_index].round().asfactor()
# train H2O model with remove_collinear_columns=True
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, alpha=self.best_alpha,
remove_collinear_columns=True)
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
print("Best lambda is {0}, best alpha is {1}".format(self.best_lambda, self.best_alpha))
# evaluate model over test data set
model_h2o_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM and test1 H2O model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, model_h2o_metrics, self.family,
"\nTest3 Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and weights"
" ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O remove_collinear_columns "
"intercept and weights: \n",
"H2O remove_collinear_columns"
" logloss from training dataset: ",
"H2O remove_collinear_columns"
" logloss from test dataset",
"H2O remove_collinear_columns"
" confusion matrix from "
"training dataset: \n",
"H2O remove_collinear_columns"
" confusion matrix from"
" test dataset: \n",
"H2O remove_collinear_columns"
" accuracy from"
" training dataset: ",
"H2O remove_collinear_columns"
" accuracy from test"
" dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from"
" training dataset: ",
"H2O test1 template logloss from"
" test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from"
" training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test4_glm_remove_collinear_columns",
num_test_failed, self.test_failed)
self.test_num += 1
def test5_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to replace a value with
nan and create missing values. Sklearn logistic regression model is built using the
data set where we have imputed the missing values. This Sklearn model will be used to
compare our H2O models with.
"""
print("*******************************************************************************************")
print("Test5: test the GLM with imputation of missing values with column averages.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_nans, self.test_data_file_nans, False, False)
# import training set and test set
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_nans))
# change the response columns to be categorical
training_data[self.y_index] = training_data[self.y_index].round().asfactor()
test_data[self.y_index] = test_data[self.y_index].round().asfactor()
# train H2O models with missing_values_handling="MeanImputation"
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0,
missing_values_handling="MeanImputation")
model_h2o.train(x=self.x_indices, y=self.y_index, training_frame=training_data)
# calculate H2O model performance with test data set
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM and Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics,
self.family, "\nTest5 Done!",
compare_att_str=[
"\nComparing intercept and weights"
" ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O missing values intercept and"
" weights: \n",
"H2O missing values logloss from"
" training dataset: ",
"H2O missing values logloss from"
" test dataset",
"H2O missing values confusion matrix"
" from training dataset: \n",
"H2O missing values confusion matrix"
" from test dataset: \n",
"H2O missing values accuracy from"
" training dataset: ",
"H2O missing values accuracy from"
" test dataset: "],
template_att_str=[
"Sklearn missing values intercept"
" and weights: \n",
"Sklearn missing values logloss from"
" training dataset: ",
"Sklearn missing values logloss from"
" test dataset: ",
"Sklearn missing values confusion"
" matrix from training dataset: \n",
"Sklearn missing values confusion"
" matrix from test dataset: \n",
"Sklearn missing values accuracy"
" from training dataset: ",
"Sklearn missing values accuracy"
" from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ"
" too much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if tests have failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test5_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test6_enum_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Again, we build a Sklearn
logistic regression and compare our H2O models with it.
"""
# no regularization in this case, use reference level plus one-hot-encoding
print("*******************************************************************************************")
print("Test6: test the GLM with enum/real values.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_enum_nans, self.test_data_file_enum_nans, True, False)
# import training set and test set with missing values
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col - 1
x_indices = list(range(y_index))
# change response variables to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if training_data[y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
test_data[y_index] = test_data[y_index].round().asfactor()
# generate H2O model
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0,
missing_values_handling="MeanImputation")
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM with Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics,
self.family, "\nTest6 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O with enum/real values, "
"no regularization and missing values"
" intercept and weights: \n",
"H2O with enum/real values, no "
"regularization and missing values"
" logloss from training dataset: ",
"H2O with enum/real values, no"
" regularization and missing values"
" logloss from test dataset",
"H2O with enum/real values, no"
" regularization and missing values"
" confusion matrix from training"
" dataset: \n",
"H2O with enum/real values, no"
" regularization and missing values"
" confusion matrix from test"
" dataset: \n",
"H2O with enum/real values, no"
" regularization and missing values "
"accuracy from training dataset: ",
"H2O with enum/real values, no "
"regularization and missing values"
" accuracy from test dataset: "],
template_att_str=[
"Sklearn missing values intercept "
"and weights: \n",
"Sklearn with enum/real values, no"
" regularization and missing values"
" logloss from training dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values"
" logloss from test dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values "
"confusion matrix from training"
" dataset: \n",
"Sklearn with enum/real values, no "
"regularization and missing values "
"confusion matrix from test "
"dataset: \n",
"Sklearn with enum/real values, no "
"regularization and missing values "
"accuracy from training dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values "
"accuracy from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
h2o.cluster_info()
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test6_enum_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test7_missing_enum_values_lambda_search(self):
"""
Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns) and setting lambda search to be True.
We use the same predictors with missing values from test6. Next, we encode the categorical columns using
true one hot encoding since Lambda-search will be enabled with alpha set to 0.5. Since the encoding
is different in this case from test6, we will build a brand new Sklearn logistic regression model and
compare the best H2O model logloss/prediction accuracy with it.
"""
# perform lambda_search, regularization and one hot encoding.
print("*******************************************************************************************")
print("Test7: test the GLM with imputation of missing enum/real values under lambda search.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_enum_nans,
self.test_data_file_enum_nans_true_one_hot, True, True,
validation_data_file=self.validation_data_file_enum_nans_true_one_hot)
# import training set and test set with missing values and true one hot encoding
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans_true_one_hot))
validation_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file_enum_nans_true_one_hot))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans_true_one_hot))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
validation_data[ind] = validation_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col - 1
x_indices = list(range(y_index))
# change response column to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if training_data[y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
validation_data[y_index] = validation_data[y_index].round().asfactor()
test_data[y_index] = test_data[y_index].round().asfactor()
# train H2O model
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20, missing_values_handling="MeanImputation")
model_h2o_0p5.train(x=x_indices, y=y_index, training_frame=training_data, validation_frame=validation_data)
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM with Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o_0p5, h2o_model_0p5_test_metrics,
self.family, "\nTest7 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O with enum/real values, lamba "
"search and missing values intercept"
" and weights: \n",
"H2O with enum/real values, lamba "
"search and missing values logloss "
"from training dataset: ",
"H2O with enum/real values, lamba "
"search and missing values logloss "
"from test dataset",
"H2O with enum/real values, lamba "
"search and missing values confusion "
"matrix from training dataset: \n",
"H2O with enum/real values, lamba "
"search and missing values confusion "
"matrix from test dataset: \n",
"H2O with enum/real values, lamba "
"search and missing values accuracy "
"from training dataset: ",
"H2O with enum/real values, lamba "
"search and missing values accuracy "
"from test dataset: "],
template_att_str=[
"Sklearn with enum/real values, lamba"
" search and missing values intercept"
" and weights: \n",
"Sklearn with enum/real values, lamba"
" search and missing values logloss "
"from training dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values logloss "
"from test dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values confusion"
" matrix from training dataset: \n",
"Sklearn with enum/real values, lamba"
" search and missing values confusion"
" matrix from test dataset: \n",
"Sklearn with enum/real values, lamba"
" search and missing values accuracy"
" from training dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values accuracy"
" from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too"
" much!", "", "", "Accuracies from"
" training dataset differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += \
pyunit_utils.show_test_results("test7_missing_enum_values_lambda_search", num_test_failed, self.test_failed)
self.test_num += 1
def sklearn_binomial_result(self, training_data_file, test_data_file, has_categorical, true_one_hot,
validation_data_file=""):
"""
This function will generate a Sklearn logistic model using the same set of data sets we have used to build
our H2O models. The purpose here is to be able to compare the performance of H2O
models with the Sklearn model built here. This is useful in cases where theoretical solutions
do not exist. If the data contains missing values, mean imputation is applied to the data set before
a Sklearn model is built. In addition, if there are enum columns in predictors and also missing values,
the same encoding and missing value imputation method used by H2O is applied to the data set before we build
the Sklearn model.
:param training_data_file: string storing training data set filename with directory path.
:param test_data_file: string storing test data set filename with directory path.
:param has_categorical: bool indicating if we data set contains mixed predictors (both enum and real)
:param true_one_hot: bool True: true one hot encoding is used. False: reference level plus one hot encoding
is used
:param validation_data_file: optional string, denoting validation file so that we can concatenate
training and validation data sets into a big training set since H2O model is using a training
and a validation data set.
:return: a tuple containing the weights, logloss, confusion matrix, prediction accuracy calculated on training
data set and test data set respectively.
"""
# read in the training data into a matrix
training_data_xy = np.asmatrix(np.genfromtxt(training_data_file, delimiter=',', dtype=None))
test_data_xy = np.asmatrix(np.genfromtxt(test_data_file, delimiter=',', dtype=None))
if len(validation_data_file) > 0: # validation data set exist and add it to training_data
temp_data_xy = np.asmatrix(np.genfromtxt(validation_data_file, delimiter=',', dtype=None))
training_data_xy = np.concatenate((training_data_xy, temp_data_xy), axis=0)
# if predictor contains categorical data, perform encoding of enums to binary bits
# for missing categorical enums, a new level is created for the nans
if has_categorical:
training_data_xy = pyunit_utils.encode_enum_dataset(training_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
test_data_xy = pyunit_utils.encode_enum_dataset(test_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
# replace missing values for real value columns with column mean before proceeding for training/test data sets
if np.isnan(training_data_xy).any():
inds = np.where(np.isnan(training_data_xy))
col_means = np.asarray(np.nanmean(training_data_xy, axis=0))[0]
training_data_xy[inds] = np.take(col_means, inds[1])
if np.isnan(test_data_xy).any():
# replace the actual means with column means from training
inds = np.where(np.isnan(test_data_xy))
test_data_xy = pyunit_utils.replace_nan_with_mean(test_data_xy, inds, col_means)
# now data is ready to be massaged into format that sklearn can use
(response_y, x_mat) = pyunit_utils.prepare_data_sklearn_multinomial(training_data_xy)
(t_response_y, t_x_mat) = pyunit_utils.prepare_data_sklearn_multinomial(test_data_xy)
# train the sklearn Model
sklearn_model = LogisticRegression(class_weight=self.sklearn_class_weight)
sklearn_model = sklearn_model.fit(x_mat, response_y)
# grab the performance metrics on training data set
accuracy_training = sklearn_model.score(x_mat, response_y)
weights = sklearn_model.coef_
p_response_y = sklearn_model.predict(x_mat)
log_prob = sklearn_model.predict_log_proba(x_mat)
logloss_training = self.logloss_sklearn(response_y, log_prob)
cm_train = metrics.confusion_matrix(response_y, p_response_y)
# grab the performance metrics on the test data set
p_response_y = sklearn_model.predict(t_x_mat)
log_prob = sklearn_model.predict_log_proba(t_x_mat)
logloss_test = self.logloss_sklearn(t_response_y, log_prob)
cm_test = metrics.confusion_matrix(t_response_y, p_response_y)
accuracy_test = metrics.accuracy_score(t_response_y, p_response_y)
return weights, logloss_training, cm_train, accuracy_training, logloss_test, cm_test, accuracy_test
def logloss_sklearn(self, true_y, log_prob):
"""
This function calculate the average logloss for SKlean model given the true response (trueY) and the log
probabilities (logProb).
:param true_y: array denoting the true class label
:param log_prob: matrix containing the log of Prob(Y=0) and Prob(Y=1)
:return: average logloss.
"""
(num_row, num_class) = log_prob.shape
logloss = 0.0
for ind in range(num_row):
logloss += log_prob[ind, int(true_y[ind])]
return -1.0 * logloss / num_row
def test_glm_binomial():
"""
Create and instantiate TestGLMBinomial class and perform tests specified for GLM
Binomial family.
:return: None
"""
test_glm_binomial = TestGLMBinomial()
test_glm_binomial.test1_glm_no_regularization()
test_glm_binomial.test2_glm_lambda_search()
test_glm_binomial.test3_glm_grid_search()
test_glm_binomial.test4_glm_remove_collinear_columns()
test_glm_binomial.test5_missing_values()
test_glm_binomial.test6_enum_missing_values()
test_glm_binomial.test7_missing_enum_values_lambda_search()
test_glm_binomial.teardown()
sys.stdout.flush()
if test_glm_binomial.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_binomial)
else:
test_glm_binomial()
| apache-2.0 |
gviejo/ThalamusPhysio | python/main_test_final_classification_XGB.py | 1 | 13731 | import ternary
import numpy as np
import pandas as pd
from functions import *
import sys
from functools import reduce
from sklearn.manifold import *
from sklearn.cluster import *
from sklearn.linear_model import *
from sklearn.ensemble import *
from pylab import *
import _pickle as cPickle
from skimage.filters import gaussian
from sklearn.model_selection import cross_val_score
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
import xgboost as xgb
def extract_tree_threshold(trees):
""" Take BST TREE and return a dict = {features index : [splits position 1, splits position 2, ...]}
"""
n = len(trees.get_dump())
thr = {}
for t in range(n):
gv = xgb.to_graphviz(trees, num_trees=t)
body = gv.body
for i in range(len(body)):
for l in body[i].split('"'):
if 'f' in l and '<' in l:
tmp = l.split("<")
if tmp[0] in thr:
thr[tmp[0]].append(float(tmp[1]))
else:
thr[tmp[0]] = [float(tmp[1])]
for k in thr:
thr[k] = np.sort(np.array(thr[k]))
return thr
def xgb_decodage(Xr, Yr, Xt, n_class):
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
params = {'objective': "multi:softprob",
'eval_metric': "mlogloss", #loglikelihood loss
'seed': np.random.randint(1, 10000), #for reproducibility
'silent': 1,
'learning_rate': 0.01,
'min_child_weight': 2,
'n_estimators': 100,
# 'subsample': 0.5,
'max_depth': 5,
'gamma': 0.5,
'num_class':n_class}
num_round = 1000
bst = xgb.train(params, dtrain, num_round)
ymat = bst.predict(dtest)
pclas = np.argmax(ymat, 1)
return pclas
def fit_cv(X, Y, n_cv=10, verbose=1, shuffle = False):
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
cv_kf = KFold(n_splits=n_cv, shuffle=True, random_state=42)
skf = cv_kf.split(X)
Y_hat=np.zeros(len(Y))*np.nan
n_class = len(np.unique(Y))
for idx_r, idx_t in skf:
Xr = np.copy(X[idx_r, :])
Yr = np.copy(Y[idx_r])
Xt = np.copy(X[idx_t, :])
Yt = np.copy(Y[idx_t])
if shuffle: np.random.shuffle(Yr)
Yt_hat = xgb_decodage(Xr, Yr, Xt, n_class)
Y_hat[idx_t] = Yt_hat
return Y_hat
############################################################################################################
# LOADING DATA
############################################################################################################
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
burstiness = pd.HDFStore("/mnt/DataGuillaume/MergedData/BURSTINESS.h5")['w']
lambdaa = pd.read_hdf("/mnt/DataGuillaume/MergedData/LAMBDA_AUTOCORR.h5")[('rem', 'b')]
lambdaa = lambdaa[np.logical_and(lambdaa>0.0,lambdaa<30.0)]
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
# rippower = pd.read_hdf("../figures/figures_articles/figure2/power_ripples_2.h5")
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
swr_phase = pd.read_hdf("/mnt/DataGuillaume/MergedData/SWR_PHASE.h5")
# SWR MODULATION
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (5,)).transpose())
swr = swr.loc[-500:500]
# AUTOCORR FAST
store_autocorr = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_ALL.h5")
autocorr_wak = store_autocorr['wake'].loc[0.5:]
autocorr_rem = store_autocorr['rem'].loc[0.5:]
autocorr_sws = store_autocorr['sws'].loc[0.5:]
autocorr_wak = autocorr_wak.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_rem = autocorr_rem.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_sws = autocorr_sws.rolling(window = 20, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 3.0)
autocorr_wak = autocorr_wak[2:150]
autocorr_rem = autocorr_rem[2:150]
autocorr_sws = autocorr_sws[2:150]
# HISTOGRAM THETA
theta_hist = pd.read_hdf("/mnt/DataGuillaume/MergedData/THETA_THAL_HISTOGRAM_2.h5")
theta_hist = theta_hist.rolling(window = 5, win_type='gaussian', center = True, min_periods=1).mean(std=1.0)
theta_wak = theta_hist.xs(('wak'), 1, 1)
theta_rem = theta_hist.xs(('rem'), 1, 1)
# AUTOCORR LONG
store_autocorr2 = pd.HDFStore("/mnt/DataGuillaume/MergedData/AUTOCORR_LONG.h5")
autocorr2_wak = store_autocorr2['wak'].loc[0.5:]
autocorr2_rem = store_autocorr2['rem'].loc[0.5:]
autocorr2_sws = store_autocorr2['sws'].loc[0.5:]
autocorr2_wak = autocorr2_wak.rolling(window = 100, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 10.0)
autocorr2_rem = autocorr2_rem.rolling(window = 100, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 10.0)
autocorr2_sws = autocorr2_sws.rolling(window = 100, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 10.0)
autocorr2_wak = autocorr2_wak[2:2000]
autocorr2_rem = autocorr2_rem[2:2000]
autocorr2_sws = autocorr2_sws[2:2000]
############################################################################################################
# WHICH NEURONS
############################################################################################################
firing_rate = pd.read_hdf("/mnt/DataGuillaume/MergedData/FIRING_RATE_ALL.h5")
fr_index = firing_rate.index.values[((firing_rate >= 1.0).sum(1) == 3).values]
# neurons = reduce(np.intersect1d, (burstiness.index.values, theta.index.values, rippower.index.values, fr_index))
# neurons = reduce(np.intersect1d, (fr_index, autocorr_sws.columns, autocorr2_rem.columns, theta_rem.columns, swr.columns, lambdaa.index.values))
neurons = reduce(np.intersect1d, (fr_index, autocorr_sws.columns, autocorr_rem.columns, autocorr_wak.columns, swr.columns))
# neurons = np.array([n for n in neurons if 'Mouse17' in n])
# nucleus = ['AD', 'AM', 'AVd', 'AVv', 'VA', 'LDvl', 'CM']
# neurons = np.intersect1d(neurons, mappings.index[mappings['nucleus'].isin(nucleus)])
count_nucl = pd.DataFrame(columns = ['12', '17','20', '32'])
for m in ['12', '17','20', '32']:
subspace = pd.read_hdf("/mnt/DataGuillaume/MergedData/subspace_Mouse"+m+".hdf5")
nucleus = np.unique(subspace['nucleus'])
total = [np.sum(subspace['nucleus'] == n) for n in nucleus]
count_nucl[m] = pd.Series(index = nucleus, data = total)
nucleus = list(count_nucl.dropna().index.values)
allnucleus = list(np.unique(mappings.loc[neurons,'nucleus']))
tokeep = np.array([n for n in neurons if mappings.loc[n,'nucleus'] in nucleus])
############################################################################################################
# STACKING DIMENSIONS
############################################################################################################
# pc_short_rem = PCA(n_components=10).fit_transform(autocorr_rem[neurons].values.T)
# pc_short_wak = PCA(n_components=10).fit_transform(autocorr_wak[neurons].values.T)
# pc_short_sws = PCA(n_components=10).fit_transform(autocorr_sws[neurons].values.T)
# pc_short_rem = np.log((pc_short_rem - pc_short_rem.min(axis = 0))+1)
# pc_short_wak = np.log((pc_short_wak - pc_short_wak.min(axis = 0))+1)
# pc_short_sws = np.log((pc_short_sws - pc_short_sws.min(axis = 0))+1)
# pc_long = PCA(n_components=1).fit_transform(autocorr2_rem[neurons].values.T)
# pc_long = np.log((pc_long - pc_long.min(axis=0))+1)
# # pc_long = np.log(lambdaa.loc[neurons].values[:,np.newaxis])
# # pc_theta = np.hstack([np.cos(theta.loc[neurons,'phase']).values[:,np.newaxis],np.sin(theta.loc[neurons,'phase']).values[:,np.newaxis],np.log(theta.loc[neurons,'kappa'].values[:,np.newaxis])])
# pc_theta = np.hstack([np.log(theta.loc[neurons,'kappa'].values[:,np.newaxis])])
# pc_swr = np.hstack([np.log(rippower.loc[neurons].values[:,np.newaxis])])
# pc_theta = PCA(n_components=3).fit_transform(theta_rem[neurons].values.T)
# pc_theta = np.log((pc_theta - pc_theta.min(axis = 0))+1)
# pc_swr = PCA(n_components=3).fit_transform(swr[neurons].values.T)
# pc_swr = np.log((pc_swr - pc_swr.min(axis = 0))+1)
# pc_theta -= pc_theta.min(axis = 0)
# pc_swr -= pc_swr.min(axis = 0)
# pc_theta = np.log(pc_theta+1)
# pc_swr = np.log(pc_swr+1)
# data = []
# for tmp in [autocorr_sws[neurons].values.T,autocorr2_rem[neurons].values.T,theta_rem[neurons].values.T,swr[neurons].values.T]:
# tmp = tmp - tmp.min()
# tmp = tmp / tmp.max()
# data.append(tmp)
# data = np.hstack([pc_short_rem, pc_short_sws, pc_long, pc_short_wak, pc_long, pc_theta, pc_swr])
# data = np.hstack([pc_short_rem, pc_short_sws, pc_short_wak])
# data = np.hstack([pc_theta, pc_swr])
# data = np.vstack([ autocorr_wak[neurons].values,autocorr_rem[neurons].values,autocorr_sws[neurons].values]).T
data = np.vstack([ autocorr_wak[tokeep].values,autocorr_rem[tokeep].values,autocorr_sws[tokeep].values,
autocorr2_wak[tokeep].values,autocorr2_rem[tokeep].values,autocorr2_sws[tokeep].values,
theta_hist.xs(('wak'),1,1)[tokeep].values,theta_hist.xs(('rem'),1,1)[tokeep].values,
swr[tokeep].values]).T
labels = np.array([nucleus.index(mappings.loc[n,'nucleus']) for n in tokeep])
##########################################################################################################
# XGB
##########################################################################################################
# alldata = [ np.vstack([autocorr_wak[tokeep].values,autocorr_rem[tokeep].values,autocorr_sws[tokeep].values]),
# np.vstack([autocorr2_wak[tokeep].values,autocorr2_rem[tokeep].values,autocorr2_sws[tokeep].values]),
# np.vstack([theta_hist.xs(('wak'),1,1)[tokeep].values,theta_hist.xs(('rem'),1,1)[tokeep].values]),
# swr[tokeep].values
# ]
alldata = [ np.vstack([autocorr_wak[tokeep].values,autocorr_rem[tokeep].values,autocorr_sws[tokeep].values]),
swr[tokeep].values
]
mean_score = pd.DataFrame(index = nucleus,columns=pd.MultiIndex.from_product([['score', 'shuffle'],['auto','swr'], ['mean', 'sem']]))
cols = np.unique(mean_score.columns.get_level_values(1))
n_repeat = 1000
for i, m in enumerate(cols):
data = alldata[i].T
test_score = pd.DataFrame(index = np.arange(n_repeat), columns = pd.MultiIndex.from_product([['test','shuffle'], nucleus]))
print(i,m)
for j in range(n_repeat):
test = fit_cv(data, labels, 10, verbose = 0)
rand = fit_cv(data, labels, 10, verbose = 0, shuffle = True)
print(i,j)
for k, n in enumerate(nucleus):
idx = labels == nucleus.index(n)
test_score.loc[j,('test',n)] = np.sum(test[idx] == nucleus.index(n))/np.sum(labels == nucleus.index(n))
test_score.loc[j,('shuffle',n)] = np.sum(rand[idx] == nucleus.index(n))/np.sum(labels == nucleus.index(n))
mean_score[('score',m,'mean')] = test_score['test'].mean(0)
mean_score[('score',m,'sem')] = test_score['test'].sem(0)
mean_score[('shuffle',m,'mean')] = test_score['shuffle'].mean(0)
mean_score[('shuffle',m,'sem')] = test_score['shuffle'].sem(0)
mean_score = mean_score.sort_values(('score','auto', 'mean'))
mean_score.to_hdf(data_directory+'SCORE_XGB.h5', 'mean_score')
##########################################################################################################
# KL DIVERGENCE
##########################################################################################################
###########################################################################################################
# LOOKING AT SPLITS
###########################################################################################################
# data = np.vstack(alldata).T
# dtrain = xgb.DMatrix(data, label=labels)
# params = {'objective': "multi:softprob",
# 'eval_metric': "mlogloss", #loglikelihood loss
# 'seed': 2925, #for reproducibility
# 'silent': 1,
# 'learning_rate': 0.05,
# 'min_child_weight': 2,
# 'n_estimators': 100,
# # 'subsample': 0.5,
# 'max_depth': 5,
# 'gamma': 0.5,
# 'num_class':len(nucleus)}
# num_round = 100
# bst = xgb.train(params, dtrain, num_round)
# splits = extract_tree_threshold(bst)
# features_id = np.hstack([np.ones(alldata[i].shape[0])*i for i in range(4)])
# features = np.zeros(data.shape[1])
# for k in splits: features[int(k[1:])] = len(splits[k])
figure()
ct = 0
for i, c in enumerate(cols):
bar(np.arange(len(nucleus))+ct, mean_score[('score',c, 'mean')].values.flatten(), 0.2)
bar(np.arange(len(nucleus))+ct, mean_score[('shuffle',c, 'mean')].values.flatten(), 0.2, alpha = 0.5)
xticks(np.arange(len(nucleus)), mean_score.index.values)
ct += 0.2
show()
# tmp = mean_score['score'] - mean_score['shuffle']
# tmp = tmp.sort_values('auto')
# figure()
# ct = 0
# for i, c in enumerate(cols):
# bar(np.arange(len(nucleus))+ct, tmp[c].values.flatten(), 0.2)
# xticks(np.arange(len(nucleus)), mean_score.index.values)
# ct += 0.2
# show()
# # mean_score = pd.read_hdf("../figures/figures_articles/figure6/mean_score.h5")
# # mean_score.to_hdf("../figures/figures_articles/figure6/mean_score.h5", 'xgb')
# figure()
# ct = 0
# for i, c in enumerate(cols):
# bar(np.arange(len(nucleus))+ct, mean_score[('score',c )].values.flatten(), 0.2)
# bar(np.arange(len(nucleus))+ct, mean_score[('shuffle',c)].values.flatten(), 0.2, alpha = 0.5)
# xticks(np.arange(len(nucleus)), mean_score.index.values)
# ct += 0.2
# show()
| gpl-3.0 |
NeuroDataDesign/seelviz | Jupyter/cherrypy/albert.py | 1 | 19679 | import os
import os.path
import cherrypy
from cherrypy.lib import static
from cherrypy.lib.static import serve_file
import shutil
import tempfile
import glob
from clarityviz import claritybase, densitygraph, atlasregiongraph
# from clarityviz import densitygraph as dg
# from clarityviz import atlasregiongraph as arg
import networkx as nx
import plotly
import matplotlib
import matplotlib.pyplot as plt
from ndreg import *
import ndio.remote.neurodata as neurodata
import nibabel as nb
from numpy import genfromtxt
localDir = os.path.dirname(__file__)
absDir = os.path.join(os.getcwd(), localDir)
# print absDir
def imgGet(inToken, alignment):
refToken = "ara_ccf2" # hardcoded 'ara_ccf2' atlas until additional functionality is requested
refImg = imgDownload(refToken) # download atlas
refAnnoImg = imgDownload(refToken, channel="annotation")
print "reference token/atlas obtained"
inImg = imgDownload(inToken, resolution=5) # store downsampled level 5 brain to memory
(values, bins) = np.histogram(sitk.GetArrayFromImage(inImg), bins=100, range=(0,500))
print "level 5 brain obtained"
counts = np.bincount(values)
maximum = np.argmax(counts)
lowerThreshold = maximum
upperThreshold = sitk.GetArrayFromImage(inImg).max()+1
inImg = sitk.Threshold(inImg,lowerThreshold,upperThreshold,lowerThreshold) - lowerThreshold
print "applied filtering"
spacingImg = inImg.GetSpacing()
spacing = tuple(i * 50 for i in spacingImg)
inImg.SetSpacing(spacingImg)
inImg_download = inImg # Aut1367 set to default spacing
inImg = imgResample(inImg, spacing=refImg.GetSpacing())
print "resampled img"
Img_reorient = imgReorient(inImg, alignment, "RSA") # reoriented Aut1367
refImg_ds = imgResample(refImg, spacing=spacing) # atlas with downsampled spacing 10x
inImg_ds = imgResample(Img_reorient, spacing=spacing) # Aut1367 with downsampled spacing 10x
print "reoriented image"
affine = imgAffineComposite(inImg_ds, refImg_ds, iterations=100, useMI=True, verbose=True)
inImg_affine = imgApplyAffine(Img_reorient, affine, size=refImg.GetSize())
print "affine"
inImg_ds = imgResample(inImg_affine, spacing=spacing)
(field, invField) = imgMetamorphosisComposite(inImg_ds, refImg_ds, alphaList=[0.05, 0.02, 0.01], useMI=True, iterations=100, verbose=True)
inImg_lddmm = imgApplyField(inImg_affine, field, size=refImg.GetSize())
print "downsampled image"
invAffine = affineInverse(affine)
invAffineField = affineToField(invAffine, refImg.GetSize(), refImg.GetSpacing())
invField = fieldApplyField(invAffineField, invField)
inAnnoImg = imgApplyField(refAnnoImg, invField,useNearest=True, size=Img_reorient.GetSize())
inAnnoImg = imgReorient(inAnnoImg, "RSA", alignment)
inAnnoImg = imgResample(inAnnoImg, spacing=inImg_download.GetSpacing(), size=inImg_download.GetSize(), useNearest=True)
print "inverse affine"
imgName = inToken + "reorient_atlas"
location = "img/" + imgName + ".nii"
imgWrite(inAnnoImg, str(location))
# ndImg = sitk.GetArrayFromImage(inAnnoImg)
# sitk.WriteImage(inAnnoImg, location)
print "generated output"
print imgName
return imgName
def image_parse(inToken):
token, alignment = inToken.split(" ")
imgName = imgGet(token, alignment)
# imgName = str(inToken) + "reorient_atlas"
copydir = os.path.join(os.getcwd(), os.path.dirname('img/'))
img = claritybase.claritybase(imgName, copydir) # initial call for clarityviz
print "loaded into claritybase"
img.loadEqImg()
print "loaded image"
img.applyLocalEq()
print "local histogram equalization"
img.loadGeneratedNii()
print "loaded generated nii"
img.calculatePoints(threshold = 0.9, sample = 0.05)
print "calculating points"
img.brightPoints()
print "saving brightest points to csv"
img.generate_plotly_html()
print "generating plotly"
img.plot3d()
print "generating nodes and edges list"
img.graphmlconvert()
print "generating graphml"
img.get_brain_figure(None, imgName + ' edgecount')
return imgName
def density_graph(Token):
densg = dg.densitygraph(Token)
print 'densitygraph module'
densg.generate_density_graph()
print 'generated density graph'
g = nx.read_graphml(Token + '/' + Token + '.graphml')
ggraph = densg.get_brain_figure(g = g, plot_title=Token)
plotly.offline.plot(ggraph, filename = Token + '/' + Token + '_brain_figure.html')
hm = densg.generate_heat_map()
plotly.offline.plot(hm, filename = Token + '/' + Token + '_brain_heatmap.html')
def atlas_region(Token):
atlas_img = Token + '/' + Token + 'localeq' + '.nii'
atlas = nb.load(atlas_img) # <- atlas .nii image
atlas_data = atlas.get_data()
csvfile = Token + '/' + Token + '.csv' # 'atlasexp/Control258localeq.csv' # <- regular csv from the .nii to csv step
bright_points = genfromtxt(csvfile, delimiter=',')
locations = bright_points[:, 0:3]
regions = [atlas_data[l[0], l[1], l[2]] for l in locations]
outfile = open(Token + '/' + Token + '.region.csv', 'w')
infile = open(csvfile, 'r')
for i, line in enumerate(infile):
line = line.strip().split(',')
outfile.write(",".join(line) + "," + str(regions[i]) + "\n") # adding a 5th column to the original csv indicating its region (integer)
infile.close()
outfile.close()
print len(regions)
print regions[0:10]
uniq = list(set(regions))
numRegions = len(uniq)
print len(uniq)
print uniq
newToken = Token + '.region'
atlas = arg.atlasregiongraph(newToken, Token)
atlas.generate_atlas_region_graph(None, numRegions)
class FileDemo(object):
@cherrypy.expose
def index(self, directory="."):
img = []
for filename in glob.glob('img/*'):
img.append(filename)
html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>ClarityViz</title>
<!-- Bootstrap Core CSS -->
<link href="static/vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="static/vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,800italic,400,300,600,700,800' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Merriweather:400,300,300italic,400italic,700,700italic,900,900italic' rel='stylesheet' type='text/css'>
<!-- Plugin CSS -->
<link href="static/vendor/magnific-popup/magnific-popup.css" rel="stylesheet">
<!-- Theme CSS -->
<link href="static/css/creative.min.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="static/css/style.css" rel="stylesheet">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body id="page-top">
<nav id="mainNav" class="navbar navbar-default navbar-fixed-top">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1">
<span class="sr-only">Toggle navigation</span> Menu <i class="fa fa-bars"></i>
</button>
<a class="navbar-brand page-scroll" href="#page-top">ClarityViz</a>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav navbar-right">
<li>
<a class="page-scroll" href="https://neurodatadesign.github.io/seelviz/#Project">Project Description</a>
</li>
<li>
<a class="page-scroll" href="https://neurodatadesign.github.io/seelviz/#Graph">Graph Explanations</a>
</li>
<li>
<a class="page-scroll" href="https://neurodatadesign.github.io/seelviz/#About">About Us</a>
</li>
<li>
<a class="page-scroll" href="https://neurodatadesign.github.io/seelviz/#Acknowledgments">Acknowledgements</a>
</li>
</ul>
</div>
<!-- /.navbar-collapse -->
</div>
<!-- /.container-fluid -->
</nav>
<header>
<div class="header-content">
<div class="header-content-inner">
<h1 id="homeHeading">Select File</h1>
<hr>
<!-- Columns start at 50% wide on mobile and bump up to 33.3% wide on desktop -->
<div class="row">
<div class="col-xs-6 col-md-4"></div>
<div class="col-xs-6 col-md-4">
<form action="upload" method="post" enctype="multipart/form-data">
<div class="form-group">
<label for="myFile">Upload a File</label>
<div class="center-block"></div>
<input type="file" class="form-control" id="myFile" name="myFile">
<!-- <p class="help-block">Example block-level help text here.</p> -->
</div>
<!-- filename: <input type="file" name="myFile" /><br /> -->
<input class="btn btn-default" type="submit" value="Submit">
</form>
<h2>OR</h2>
<form action="neurodata" method="post" enctype="multipart/form-data">
<div class="form-group">
<label for="myToken">Submit Token and brain orientation</label>
<input type="text" class="form-control" id="myToken" name="myToken" placeholder="Token">
</div>
<!-- Token name: <input type="text" name="myToken"/><br /> -->
<input class="btn btn-default" type="submit" value="Submit">
</form>
</div>
<div class="col-xs-6 col-md-4"></div>
</div>
</div>
</div>
</header>
<section class="bg-primary" id="about">
<div class="container">
<div class="row">
<div class="col-lg-8 col-lg-offset-2 text-center">
<h2 class="section-heading">We've got what you need!</h2>
<hr class="light">
<p class="text-faded">Start Bootstrap has everything you need to get your new website up and running in no time! All of the templates and themes on Start Bootstrap are open source, free to download, and easy to use. No strings attached!</p>
<a href="#services" class="page-scroll btn btn-default btn-xl sr-button">Get Started!</a>
</div>
</div>
</div>
</section>
<section id="contact">
<div class="container">
<div class="row">
<div class="col-lg-8 col-lg-offset-2 text-center">
<h2 class="section-heading">Acknowledgements</h2>
<hr class="primary">
<p>Ready to start your next project with us? That's great! Give us a call or send us an email and we will get back to you as soon as possible!</p>
</div>
<div class="col-lg-4 col-lg-offset-2 text-center">
<i class="fa fa-phone fa-3x sr-contact"></i>
<p>123-456-6789</p>
</div>
<div class="col-lg-4 text-center">
<i class="fa fa-envelope-o fa-3x sr-contact"></i>
<p><a href="mailto:[email protected]">[email protected]</a></p>
</div>
</div>
</div>
</section>
<!-- jQuery -->
<script src="vendor/jquery/jquery.min.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="vendor/bootstrap/js/bootstrap.min.js"></script>
<!-- Plugin JavaScript -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-easing/1.3/jquery.easing.min.js"></script>
<script src="vendor/scrollreveal/scrollreveal.min.js"></script>
<script src="vendor/magnific-popup/jquery.magnific-popup.min.js"></script>
<!-- Theme JavaScript -->
<script src="js/creative.min.js"></script>
</body>
</html>
"""
return html
@cherrypy.expose
def neurodata(self, myToken):
myToken = image_parse(myToken)
density_graph(myToken)
atlas_region(myToken)
fzip = shutil.make_archive(myToken, 'zip', myToken)
fzip_abs = os.path.abspath(fzip)
html = """
<html><body>
<h2>Ouputs</h2>
"""
plotly = []
for filename in glob.glob(myToken + '/*'):
absPath = os.path.abspath(filename)
if os.path.isdir(absPath):
link = '<a href="/index?directory=' + absPath + '">' + os.path.basename(filename) + "</a> <br />"
html += link
else:
if filename.endswith('html'):
plotly.append(filename)
link = '<a href="/download/?filepath=' + absPath + '">' + os.path.basename(filename) + "</a> <br />"
html += link
for plot in plotly:
absPath = os.path.abspath(plot)
html += """
<form action="plotly" method="get">
<input type="text" value=""" + '"' + absPath + '" name="plot" ' + """/>
<button type="submit">View """ + os.path.basename(plot) + """</button>
</form>"""
# html += '<a href="file:///' + '//' + absPath + '">' + "View Plotly graph</a> <br />"
html += '<a href="/download/?filepath=' + fzip_abs + '">' + myToken + '.zip' + "</a> <br />"
html += """</body></html>"""
return html
@cherrypy.expose
def upload(self, myFile):
copy = 'local/' + myFile.filename
print copy
token = myFile.filename.split('.')[:-1]
with open(copy, 'wb') as fcopy:
while True:
data = myFile.file.read(8192)
if not data:
break
fcopy.write(data)
copydir = os.path.join(os.getcwd(), os.path.dirname('local/'))
print copydir
csv = claritybase.claritybase(token, copydir)
csv.loadInitCsv(copydir + '/' + myFile.filename)
csv.plot3d()
csv.savePoints()
csv.generate_plotly_html()
csv.graphmlconvert()
fzip = shutil.make_archive(token, 'zip', token)
fzip_abs = os.path.abspath(fzip)
html = """
<html><body>
<h2>Ouputs</h2>
"""
plotly = []
for filename in glob.glob(token + '/*'):
absPath = os.path.abspath(filename)
if os.path.isdir(absPath):
link = '<a href="/index?directory=' + absPath + '">' + os.path.basename(filename) + "</a> <br />"
html += link
else:
if filename.endswith('html'):
plotly.append(filename)
link = '<a href="/download/?filepath=' + absPath + '">' + os.path.basename(filename) + "</a> <br />"
html += link
for plot in plotly:
absPath = os.path.abspath(plot)
html += """
<form action="plotly" method="get">
<input type="text" value=""" + '"' + absPath + '" name="plot" ' + """/>
<button type="submit">View """ + os.path.basename(plot) + """</button>
</form>"""
# html += '<a href="file:///' + '//' + absPath + '">' + "View Plotly graph</a> <br />"
html += '<a href="/download/?filepath=' + fzip_abs + '">' + token + '.zip' + "</a> <br />"
html += """</body></html>"""
return html
@cherrypy.expose
def plotly(self, plot="test/testplotly.html"):
return file(plot)
class Download:
@cherrypy.expose
def index(self, filepath):
return serve_file(filepath, "application/x-download", "attachment")
tutconf = os.path.join(os.path.dirname('/usr/local/lib/python2.7/dist-packages/cherrypy/tutorial/'), 'tutorial.conf')
# print tutconf
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to index().
current_dir = os.path.dirname(os.path.abspath(__file__)) + os.path.sep
config = {
'global': {
'environment': 'production',
'log.screen': True,
'server.socket_host': '0.0.0.0',
'server.socket_port': 80,
'server.thread_pool': 10,
'engine.autoreload_on': True,
'engine.timeout_monitor.on': False,
'log.error_file': os.path.join(current_dir, 'errors.log'),
'log.access_file': os.path.join(current_dir, 'access.log'),
},
'/':{
'tools.staticdir.root' : current_dir,
},
'/static':{
'tools.staticdir.on' : True,
'tools.staticdir.dir' : 'static',
'staticFilter.on': True,
'staticFilter.dir': '/home/Tony/static'
},
}
root = FileDemo()
root.download = Download()
cherrypy.tree.mount(root)
cherrypy.quickstart(root, '/', config=config)
# cherrypy.quickstart(root, config=tutconf)
| apache-2.0 |
endrit-b/titanic-data-analysis | data_clustering.py | 1 | 2682 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
X = np.array([[1, 2],
[5, 8],
[1.5, 1.8],
[8, 8],
[1, 0.6],
[9, 11],
[1, 3],
[8, 9],
[0, 3],
[5, 4],
[6, 4]
])
colors = 10 * ['g', 'r', 'c', 'b', 'k']
class KMeansClustering:
def __init__(self, k=2, tol=0.001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self, data):
self.centroids = {}
for i in range(self.k):
self.centroids[i] = data[i]
for i in range(self.max_iter):
self.classifications = {}
for j in range(self.k):
self.classifications[j] = []
for featureset in data:
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
prev_centorids = dict(self.centroids)
for classification in self.classifications:
# self.centroids[classification] = np.average(self.classifications[classification], axis=0)
pass
optimized = True
for c in self.centroids:
original_centroid = prev_centorids[c]
current_centroid = self.centroids[c]
if np.sum((current_centroid - original_centroid)/original_centroid * 100.0) > self.tol:
optimized = False
if optimized:
break
def predict(self, data):
distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
clf = KMeansClustering()
clf.fit(X)
for centroid in clf.centroids:
plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1], marker='o', color='k', s=150, linewidths=5)
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker='x', color=color, s=150, linewidths=5)
random_vals = np.array([[1, 3],
[8, 9],
[0, 3],
[5, 4],
[6, 4], [9, 8]])
for val in random_vals:
classification = clf.predict(val)
plt.scatter(val[0], val[1], marker="*", color=colors[classification], s=150, linewidth=5)
plt.show()
| gpl-3.0 |
gwicki/FeedReader | statistics/views.py | 1 | 1927 | from django.shortcuts import render
from django.http import JsonResponse
from django.utils.safestring import SafeString
from .funcs import *
import pandas as pd
def test_bar_plot(request):
context = plot_kind['bar']( get_stats( 'f', 'd' ) )
return render(request, 'stat-test.html', context)
def test_line_plot(request):
context = plot_kind['line']( get_stats( 'n', 'd' ) )
return render(request, 'stat-test.html', context)
def list_data( stats, plot_type ):
if type(stats) == pd.Series:
data = [{
'x': [x.strftime( '%y-%m-%d %H:%M:%S' ) for x in stats.index ],
'y': [ v for v in stats.values ],
'type': plot_type
}]
else:
data = list()
# data = [{
# 'x': str(range(10)),
# 'y': [ x**2 for x in range(10) ],
# 'name': 'hello',
# 'type': plot_type,
# },{
# 'x': str(range(10)),
# 'y': [ x**0.5 for x in range(10) ],
# 'name': 'world',
# 'type': plot_type,
# }]
for y in stats.columns:
data.append( {
'x': [ x.strftime( '%y-%m-%d %H:%M:%S' ) for x in stats.index ],
'y': [ v for v in stats[y].values ],
'name': str(y),
'type': plot_type,
} )
return SafeString( data )
def bargraph( stats ):
return { 'data': list_data( stats, 'bar' ), 'layout': SafeString({ 'barmode': 'stack' }) }
def linegraph( stats ):
return { 'data': list_data( stats, 'scatter' ), 'layout': SafeString({}) }
plot_kind = {
'bar': bargraph,
'line': linegraph,
}
def plot_stat(request, channel='f', freq='d', kind='bar', **kwargs):
print channel, freq, kind
print kwargs
context = plot_kind[kind]( get_stats( channel, freq, **kwargs ) )
return render(request, 'stat-index.html', context)
| gpl-2.0 |
huzq/scikit-learn | sklearn/utils/tests/test_random.py | 10 | 7360 | import numpy as np
import pytest
import scipy.sparse as sp
from scipy.special import comb
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import _random_choice_csc, sample_without_replacement
from sklearn.utils._random import _our_rand_r_py
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
with pytest.raises(ValueError):
sample_without_replacement(5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
with pytest.raises(ValueError):
sample_without_replacement(0, 1)
with pytest.raises(ValueError):
sample_without_replacement(1, 2)
# n_population == n_samples
assert sample_without_replacement(0, 0).shape == (0, )
assert sample_without_replacement(1, 1).shape == (1, )
# n_population >= n_samples
assert sample_without_replacement(5, 0).shape == (0, )
assert sample_without_replacement(5, 1).shape == (1, )
# n_population < 0 or n_samples < 0
with pytest.raises(ValueError):
sample_without_replacement(-1, 5)
with pytest.raises(ValueError):
sample_without_replacement(5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert len(s) == n_samples
unique = np.unique(s)
assert np.size(unique) == n_samples
assert np.all(unique < n_population)
# test edge case n_population == n_samples == 0
assert np.size(sample_without_replacement(0, 0)) == 0
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = comb(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = _random_choice_csc(n_samples, classes, class_probabilities,
random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilities = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = _random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = _random_choice_csc(n_samples, classes, class_probabilities,
random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilities[k])) / n_samples
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilities = [np.array([0.0, 1.0]), np.array([1.0])]
got = _random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert sp.issparse(got)
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilities[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilities is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilities = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilities = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
with pytest.raises(ValueError):
_random_choice_csc(4, classes, class_probabilities, 1)
def test_our_rand_r():
assert 131541053 == _our_rand_r_py(1273642419)
assert 270369 == _our_rand_r_py(0)
| bsd-3-clause |
sheabrown/faraday_complexity | final/possum.py | 1 | 4428 | from simulate import *
import matplotlib.pyplot as plt
class possum(simulate):
"""
Class for creating polarization and
faraday rotation spectra.
Frequency Coverages:
_createWSRT()
Frequency range for the Westerbork
Synthesis Radio Telescope
310 - 380 MHz
_createASKAP12()
ASKAP12 frequency coverage
700 - 1300 MHz
1500 - 1800 MHz
_createASKAP36()
ASKAP36 frequency coverage
1130 - 1430 MHz
"""
def __init__(self):
self.__c = 2.99e+08 # speed of light in m/s
self.__mhz = 1.0e+06
def _createWSRT(self, *args):
"""
Create the WSRT frequency spectrum:
310 - 380 MHz
"""
self.nu_ = self._createFrequency(310., 380., nchan=400)
def _createASKAP12(self, *args):
"""
Create the ASKAP12 frequency range:
700 - 1300 MHz
1500 - 1800 MHz
To call:
_createASKAP12()
Parameters:
[None]
Postcondition:
"""
band12 = self._createFrequency(700.,1300.,nchan=600)
band3 = self._createFrequency(1500.,1800.,nchan=300)
self.nu_ = np.concatenate((band12, band3))
def _createASKAP36(self, *args):
"""
Create the ASKAP36 frequency range:
1130 - 1430 MHZ
To call:
_createASKAP36()
Parameters:
[None]
Postcondition:
"""
self.nu_ = self._createFrequency(1130., 1430., nchan=300)
def _createFrequency(self, numin=700., numax=1800., nchan=100., store=False):
"""
Creates an array of evenly spaced frequencies
numin and numax are in [MHz]
To call:
_createFrequency(numin, numax, nchan)
Parameters:
numin
numax
Postcondition:
"""
# ======================================
# Convert MHz to Hz
# ======================================
numax = numax * self.__mhz
numin = numin * self.__mhz
# ======================================
# Generate an evenly spaced grid
# of frequencies and return
# ======================================
if store:
self.nu_ = np.arange(nchan)*(numax-numin)/(nchan-1) + numin
else:
return(np.arange(nchan)*(numax-numin)/(nchan-1) + numin)
def _createNspec(self, flux, depth, chi, sig=0):
"""
Function for generating N faraday spectra
and merging into one polarization spectrum.
To call:
createNspec(flux, depth, chi, sig)
Parameters:
flux [float, array]
depth [float, array]
chi [float, array]
sig [float, const]
"""
# ======================================
# Convert inputs to matrices
# ======================================
nu = np.asmatrix(self.nu_)
flux = np.asmatrix(flux).T
chi = np.asmatrix(chi).T
depth = np.asmatrix(depth).T
# ======================================
# Compute the polarization
# ======================================
P = flux.T * np.exp(2j * (chi + depth * np.square(self.__c / nu)))
P = np.ravel(P)
# ======================================
# Add Gaussian noise
# ======================================
if sig != 0:
P += self._addNoise(sig, P.size)
# ======================================
# Store the polarization
# ======================================
self.polarization_ = P
def _createFaradaySpectrum(self, philo=-250, phihi=250):
"""
Function for creating the Faraday spectrum
"""
F = []
phi = []
chiSq = np.mean( (self.__c / self.nu_)**2)
for far in range(philo, phihi+1):
phi.append(far)
temp = np.exp(-2j * far * ((self.__c / self.nu_)**2 - chiSq))
temp = np.sum( self.polarization_ * temp)
F.append(temp)
faraday = np.asarray(F) / len(self.nu_)
self.phi_ = np.asarray(phi)
self.faraday_ = faraday / np.abs(faraday).max()
def _addNoise(self, sigma, N):
"""
Function for adding real and
imaginary noise
To call:
_addNoise(sigma, N)
Parameters:
sigma
N
"""
noiseReal = np.random.normal(scale=sigma, size=N)
noiseImag = 1j * np.random.normal(scale=sigma, size=N)
return(noiseReal + noiseImag)
# ======================================================
# Try to recreate figure 21 in Farnsworth et. al (2011)
#
# Haven't been able to get the large offset;
# peak appears between the two RM components
# ======================================================
if __name__ == '__main__':
spec = possum()
spec._simulateNspec(5)
plt.plot(spec.X_[1,:,0], 'r-', label='real')
plt.plot(spec.X_[1,:,1], 'b-', label='imag')
plt.plot(np.abs(spec.X_[1,:,0] + 1j*spec.X_[1,:,1]), 'k--', label='abs')
plt.legend(loc='best')
plt.show()
| mit |
wazeerzulfikar/scikit-learn | benchmarks/bench_sparsify.py | 50 | 3410 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features // 2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, max_iter=2000,
tol=None)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
justincassidy/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
birdage/ooi-ui-services | ooiservices/app/uframe/plot_tools.py | 1 | 25044 | """
Plotting tools for OOI data
"""
from ooiservices.app import cache
from netCDF4 import num2date
import numpy as np
import prettyplotlib as ppl
from prettyplotlib import plt
from ooiservices.app.uframe.windrose import WindroseAxes
import matplotlib.dates as mdates
from matplotlib.ticker import FuncFormatter
import seawater as sw
from matplotlib.dates import datestr2num
axis_font_default = {'fontname': 'Calibri',
'size': '14',
'color': 'black',
'weight': 'bold',
'verticalalignment': 'bottom'}
title_font_default = {'fontname': 'Arial',
'size': '18',
'color': 'black',
'weight': 'bold',
'verticalalignment': 'bottom'}
class OOIPlots(object):
def get_time_label(self, ax, dates):
'''
Custom date axis formatting
'''
def format_func(x, pos=None):
x = mdates.num2date(x)
if pos == 0:
fmt = '%Y-%m-%d %H:%M'
else:
fmt = '%H:%M'
label = x.strftime(fmt)
return label
day_delta = (max(dates)-min(dates)).days
if day_delta < 1:
ax.xaxis.set_major_formatter(FuncFormatter(format_func))
else:
major = mdates.AutoDateLocator()
formt = mdates.AutoDateFormatter(major, defaultfmt=u'%Y-%m-%d')
formt.scaled[1.0] = '%Y-%m-%d'
formt.scaled[30] = '%Y-%m'
formt.scaled[1./24.] = '%Y-%m-%d %H:%M'
# formt.scaled[1./(24.*60.)] = FuncFormatter(format_func)
ax.xaxis.set_major_locator(major)
ax.xaxis.set_major_formatter(formt)
def plot_time_series(self, fig, is_timeseries, ax, x, y, fill=False, title='', xlabel='', ylabel='',
title_font={}, axis_font={}, tick_font={}, scatter=False, qaqc=[], events={}, **kwargs):
if not title_font:
title_font = title_font_default
if not axis_font:
axis_font = axis_font_default
if scatter:
ppl.scatter(ax, x, y, **kwargs)
else:
h = ppl.plot(ax, x, y, **kwargs)
if is_timeseries:
self.get_time_label(ax, x)
fig.autofmt_xdate()
else:
ax.set_xlabel(xlabel.replace("_", " "), **axis_font)
if ylabel:
ax.set_ylabel(ylabel.replace("_", " "), **axis_font)
if title:
ax.set_title(title.replace("_", " "), **title_font)
ax.grid(True)
if fill:
miny = min(ax.get_ylim())
if not scatter:
ax.fill_between(x, y, miny+1e-7, facecolor = h[0].get_color(), alpha=0.15)
else:
ax.fill_between(x, y, miny+1e-7, facecolor = axis_font_default['color'], alpha=0.15)
if events:
ylim = ax.get_ylim()
for event in events['events']:
time = datestr2num(event['start_date'])
x = np.array([time, time])
h = ax.plot(x, ylim, '--', label=event['class'])
legend = ax.legend()
if legend:
for label in legend.get_texts():
label.set_fontsize(10)
if len(qaqc) > 0:
bad_data = np.where(qaqc > 0)
h = ppl.plot(ax, x[bad_data], y[bad_data],
marker='o',
mfc='none',
linestyle='None',
markersize=6,
markeredgewidth=2,
mec='r')
# plt.tick_params(axis='both', which='major', labelsize=10)
if tick_font:
ax.tick_params(**tick_font)
plt.tight_layout()
def plot_stacked_time_series(self, fig, ax, x, y, z, title='', ylabel='',
cbar_title='', title_font={}, axis_font={}, tick_font = {},
**kwargs):
if not title_font:
title_font = title_font_default
if not axis_font:
axis_font = axis_font_default
z = np.ma.array(z, mask=np.isnan(z))
h = plt.pcolormesh(x, y, z, shading='gouraud', **kwargs)
# h = plt.pcolormesh(x, y, z, **kwargs)
if ylabel:
ax.set_ylabel(ylabel.replace("_", " "), **axis_font)
if title:
ax.set_title(title.replace("_", " "), **title_font)
plt.axis([x.min(), x.max(), y.min(), y.max()])
ax.xaxis_date()
date_list = mdates.num2date(x)
self.get_time_label(ax, date_list)
fig.autofmt_xdate()
ax.invert_yaxis()
cbar = plt.colorbar(h)
if cbar_title:
cbar.ax.set_ylabel(cbar_title)
ax.grid(True)
if tick_font:
ax.tick_params(**tick_font)
plt.tight_layout()
def plot_profile(self, fig, ax, x, y, xlabel='', ylabel='',
axis_font={}, tick_font={}, scatter=False, **kwargs):
if not axis_font:
axis_font = axis_font_default
if scatter:
ppl.scatter(ax, x, y, **kwargs)
else:
ppl.plot(ax, x, y, **kwargs)
if xlabel:
ax.set_xlabel(xlabel.replace("_", " "), labelpad=5, **axis_font)
if ylabel:
ax.set_ylabel(ylabel.replace("_", " "), labelpad=11, **axis_font)
if tick_font:
ax.tick_params(**tick_font)
ax.xaxis.set_label_position('top') # this moves the label to the top
ax.xaxis.set_ticks_position('top')
ax.xaxis.get_major_locator()._nbins = 5
ax.grid(True)
plt.tight_layout()
# ax.set_title(title, **title_font)
def plot_histogram(self, ax, x, bins, title='', xlabel='', title_font={},
axis_font={}, tick_font={}, **kwargs):
if not title_font:
title_font = title_font_default
if not axis_font:
axis_font = axis_font_default
x = x[~np.isnan(x)]
ppl.hist(ax, x, bins, grid='y', **kwargs)
if xlabel:
ax.set_xlabel(xlabel.replace("_", " "), labelpad=10, **axis_font)
if tick_font:
ax.tick_params(**tick_font)
ax.set_ylabel('No. of Occurrences', **axis_font)
ax.set_title(title.replace("_", " "), **title_font)
# ax.grid(True)
# A quick way to create new windrose axes...
def new_axes(self, figsize):
fig = plt.figure(figsize=(figsize, figsize), facecolor='w', edgecolor='w')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, axisbg='w')
fig.add_axes(ax)
return fig, ax
def set_legend(self, ax, label='', fontsize=8):
"""Adjust the legend box."""
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.7, box.height])
# Put a legend to the right of the current axis
l = ax.legend(title=label, loc='lower left', bbox_to_anchor=(1, 0))
plt.setp(l.get_texts(), fontsize=fontsize)
def plot_rose(self, magnitude, direction, bins=8, nsector=16, figsize=8,
title='', title_font={}, legend_title='', normed=True,
opening=0.8, edgecolor='white', fontsize=8):
if not title_font:
title_font = title_font_default
fig, ax = self.new_axes(figsize)
magnitude = magnitude[~np.isnan(magnitude)]
direction = direction[~np.isnan(direction)]
cmap = plt.cm.rainbow
ax.bar(direction, magnitude, bins=bins, normed=normed, cmap=cmap,
opening=opening, edgecolor=edgecolor, nsector=nsector)
self.set_legend(ax, legend_title, fontsize)
ax.set_title(title.replace("_", " "), **title_font)
return fig
def plot_1d_quiver(self, fig, ax, time, u, v, title='', ylabel='',
title_font={}, axis_font={}, tick_font={},
legend_title="Magnitude", start=None, end=None, **kwargs):
if not title_font:
title_font = title_font_default
if not axis_font:
axis_font = axis_font_default
# Plot quiver
magnitude = (u**2 + v**2)**0.5
maxmag = max(magnitude)
ax.set_ylim(-maxmag, maxmag)
dx = time[-1] - time[0]
if start and end:
ax.set_xlim(start - 0.05 * dx, end + 0.05 * dx)
else:
ax.set_xlim(time[0] - 0.05 * dx, time[-1] + 0.05 * dx)
# ax.fill_between(time, magnitude, 0, color='k', alpha=0.1)
# # Fake 'box' to be able to insert a legend for 'Magnitude'
# p = ax.add_patch(plt.Rectangle((1, 1), 1, 1, fc='k', alpha=0.1))
# leg1 = ax.legend([p], [legend_title], loc='lower right')
# leg1._drawFrame = False
# # 1D Quiver plot
q = ax.quiver(time, 0, u, v, **kwargs)
plt.quiverkey(q, 0.2, 0.05, 0.2,
r'$0.2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'})
ax.xaxis_date()
date_list = mdates.num2date(time)
self.get_time_label(ax, date_list)
fig.autofmt_xdate()
if ylabel:
ax.set_ylabel(ylabel.replace("_", " "), labelpad=20, **axis_font)
if tick_font:
ax.tick_params(**tick_font)
ax.set_title(title.replace("_", " "), **title_font)
plt.tight_layout()
def make_patch_spines_invisible(self, ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.itervalues():
sp.set_visible(False)
def set_spine_direction(self, ax, direction):
if direction in ["right", "left"]:
ax.yaxis.set_ticks_position(direction)
ax.yaxis.set_label_position(direction)
elif direction in ["top", "bottom"]:
ax.xaxis.set_ticks_position(direction)
ax.xaxis.set_label_position(direction)
else:
raise ValueError("Unknown Direction: %s" % (direction,))
ax.spines[direction].set_visible(True)
def plot_multiple_xaxes(self, ax, xdata, ydata, colors, ylabel='Depth (m)', title='', title_font={},
axis_font={}, tick_font={}, width_in=8.3, **kwargs):
# Acknowledgment: This function is based on code written by Jae-Joon Lee,
# URL= http://matplotlib.svn.sourceforge.net/viewvc/matplotlib/trunk/matplotlib/
# examples/pylab_examples/multiple_yaxis_with_spines.py?revision=7908&view=markup
if not title_font:
title_font = title_font_default
if not axis_font:
axis_font = axis_font_default
n_vars = len(xdata)
if n_vars > 6:
raise Exception('This code currently handles a maximum of 6 independent variables.')
elif n_vars < 2:
raise Exception('This code currently handles a minimum of 2 independent variables.')
# Generate the plot.
# Use twiny() to create extra axes for all dependent variables except the first
# (we get the first as part of the ax axes).
x_axis = n_vars * [0]
x_axis[0] = ax
for i in range(1, n_vars):
x_axis[i] = ax.twiny()
ax.spines["top"].set_visible(False)
self.make_patch_spines_invisible(x_axis[1])
self.set_spine_direction(x_axis[1], "top")
offset = [1.10, -0.1, -0.20, 1.20]
spine_directions = ["top", "bottom", "bottom", "top", "top", "bottom"]
count = 0
for i in range(2, n_vars):
x_axis[i].spines[spine_directions[count]].set_position(("axes", offset[count]))
self.make_patch_spines_invisible(x_axis[i])
self.set_spine_direction(x_axis[i], spine_directions[count])
count += 1
# Adjust the axes left/right accordingly
if n_vars >= 4:
plt.subplots_adjust(bottom=0.2, top=0.8)
elif n_vars == 3:
plt.subplots_adjust(bottom=0.0, top=0.8)
# Label the y-axis:
ax.set_ylabel(ylabel, **axis_font)
for ind, key in enumerate(xdata):
x_axis[ind].plot(xdata[key], ydata, colors[ind], **kwargs)
# Label the x-axis and set text color:
x_axis[ind].set_xlabel(key.replace("_", " "), **axis_font)
x_axis[ind].xaxis.label.set_color(colors[ind])
x_axis[ind].spines[spine_directions[ind]].set_color(colors[ind])
for obj in x_axis[ind].xaxis.get_ticklines():
# `obj` is a matplotlib.lines.Line2D instance
obj.set_color(colors[ind])
obj.set_markeredgewidth(2)
for obj in x_axis[ind].xaxis.get_ticklabels():
obj.set_color(colors[ind])
ax.invert_yaxis()
ax.grid(True)
if tick_font:
ax.tick_params(**tick_font)
ax.set_title(title.replace("_", " "), y=1.23, **title_font)
plt.tight_layout()
def plot_multiple_yaxes(self, fig, ax, xdata, ydata, colors, title, units=[], scatter=False,
axis_font={}, title_font={}, tick_font={}, width_in=8.3, qaqc={}, **kwargs):
# Plot a timeseries with multiple y-axes
#
# ydata is a python dictionary of all the data to plot. Key values are used as plot labels
#
# Acknowledgment: This function is based on code written by Jae-Joon Lee,
# URL= http://matplotlib.svn.sourceforge.net/viewvc/matplotlib/trunk/matplotlib/
# examples/pylab_examples/multiple_yaxis_with_spines.py?revision=7908&view=markup
#
# http://matplotlib.org/examples/axes_grid/demo_parasite_axes2.html
if not axis_font:
axis_font = axis_font_default
if not title_font:
title_font = title_font_default
n_vars = len(ydata)
if n_vars > 6:
raise Exception('This code currently handles a maximum of 6 independent variables.')
elif n_vars < 2:
raise Exception('This code currently handles a minimum of 2 independent variables.')
if scatter:
kwargs['marker'] = 'o'
# Generate the plot.
# Use twinx() to create extra axes for all dependent variables except the first
# (we get the first as part of the ax axes).
y_axis = n_vars * [0]
y_axis[0] = ax
for i in range(1, n_vars):
y_axis[i] = ax.twinx()
ax.spines["top"].set_visible(False)
self.make_patch_spines_invisible(y_axis[1])
self.set_spine_direction(y_axis[1], "top")
# Define the axes position offsets for each 'extra' axis
spine_directions = ["left", "right", "left", "right", "left", "right"]
# Adjust the axes left/right accordingly
if n_vars >= 4:
if width_in < 8.3:
# set axis location
offset = [1.2, -0.2, 1.40, -0.40]
# overwrite width
l_mod = 0.3
r_mod = 0.8
else:
offset = [1.10, -0.10, 1.20, -0.20]
l_mod = 0.5
r_mod = 1.2
plt.subplots_adjust(left=l_mod, right=r_mod)
elif n_vars == 3:
offset = [1.20, -0.20, 1.40, -0.40]
plt.subplots_adjust(left=0.0, right=0.7)
count = 0
for i in range(2, n_vars):
y_axis[i].spines[spine_directions[count+1]].set_position(("axes", offset[count]))
self.make_patch_spines_invisible(y_axis[i])
self.set_spine_direction(y_axis[i], spine_directions[count+1])
count += 1
# Plot the data
for ind, key in enumerate(ydata):
y_axis[ind].plot(xdata[key], ydata[key], colors[ind], **kwargs)
if len(qaqc[key]) > 0:
bad_data = np.where(qaqc[key] > 0)
y_axis[ind].plot(xdata[key][bad_data], ydata[key][bad_data],
marker='o',
mfc='none',
linestyle='None',
markersize=6,
markeredgewidth=2,
mec='r')
# Label the y-axis and set text color:
# Been experimenting with other ways to handle tick labels with spines
y_axis[ind].yaxis.get_major_formatter().set_useOffset(False)
y_axis[ind].set_ylabel(key.replace("_", " ") + ' (' + units[ind] + ')', labelpad=10, **axis_font)
y_axis[ind].yaxis.label.set_color(colors[ind])
y_axis[ind].spines[spine_directions[ind]].set_color(colors[ind])
if tick_font:
labelsize = tick_font['labelsize']
y_axis[ind].tick_params(axis='y', labelsize=labelsize, colors=colors[ind])
self.get_time_label(ax, xdata['time'])
fig.autofmt_xdate()
# ax.tick_params(axis='x', labelsize=10)
ax.set_title(title.replace("_", " "), y=1.05, **title_font)
ax.grid(True)
plt.tight_layout()
def plot_multiple_streams(self, fig, ax, datasets, colors, axis_font={}, title_font={},
tick_font={}, width_in=8.3 , plot_qaqc=0, scatter=False, **kwargs):
# Plot a timeseries with multiple y-axes using multiple streams from uFrame
#
# Acknowledgment: This function is based on code written by Jae-Joon Lee,
# URL= http://matplotlib.svn.sourceforge.net/viewvc/matplotlib/trunk/matplotlib/
# examples/pylab_examples/multiple_yaxis_with_spines.py?revision=7908&view=markup
#
# http://matplotlib.org/examples/axes_grid/demo_parasite_axes2.html
if not axis_font:
axis_font = axis_font_default
if not title_font:
title_font = title_font_default
n_vars = len(datasets)
if n_vars > 6:
raise Exception('This code currently handles a maximum of 6 independent variables.')
elif n_vars < 2:
raise Exception('This code currently handles a minimum of 2 independent variables.')
if scatter:
kwargs['marker'] = 'o'
# Generate the plot.
# Use twinx() to create extra axes for all dependent variables except the first
# (we get the first as part of the ax axes).
y_axis = n_vars * [0]
y_axis[0] = ax
for i in range(1, n_vars):
y_axis[i] = ax.twinx()
ax.spines["top"].set_visible(False)
self.make_patch_spines_invisible(y_axis[1])
self.set_spine_direction(y_axis[1], "top")
# Define the axes position offsets for each 'extra' axis
spine_directions = ["left", "right", "left", "right", "left", "right"]
# Adjust the axes left/right accordingly
if n_vars >= 4:
if width_in < 8.3:
# set axis location
offset = [1.2, -0.2, 1.40, -0.40]
# overwrite width
l_mod = 0.3
r_mod = 0.8
else:
offset = [1.10, -0.10, 1.20, -0.20]
l_mod = 0.5
r_mod = 1.2
plt.subplots_adjust(left=l_mod, right=r_mod)
elif n_vars == 3:
offset = [1.20, -0.20, 1.40, -0.40]
plt.subplots_adjust(left=0.0, right=0.7)
count = 0
for i in range(2, n_vars):
y_axis[i].spines[spine_directions[count+1]].set_position(("axes", offset[count]))
self.make_patch_spines_invisible(y_axis[i])
self.set_spine_direction(y_axis[i], spine_directions[count+1])
count += 1
# Plot the data
legend_handles = []
legend_labels = []
for ind, data in enumerate(datasets):
xlabel = data['x_field'][0]
ylabel = data['y_field'][0]
xdata = data['x'][xlabel]
ydata = data['y'][ylabel]
# Handle the QAQC data
qaqc = data['qaqc'][ylabel]
if plot_qaqc >= 10:
# Plot all of the qaqc flags results
# qaqc_data = data['qaqc'][ylabel]
pass
elif plot_qaqc >= 1:
# This is a case where the user wants to plot just one of the 9 QAQC tests
ind = np.where(qaqc != plot_qaqc)
qaqc[ind] = 0
else:
qaqc = []
h, = y_axis[ind].plot(xdata, ydata, colors[ind], label=data['title'], **kwargs)
if len(qaqc) > 0:
bad_data = np.where(qaqc > 0)
y_axis[ind].plot(xdata[bad_data], ydata[bad_data],
marker='o',
mfc='none',
linestyle='None',
markersize=6,
markeredgewidth=2,
mec='r')
# Label the y-axis and set text color:
# Been experimenting with other ways to handle tick labels with spines
y_axis[ind].yaxis.get_major_formatter().set_useOffset(False)
y_axis[ind].set_ylabel(ylabel.replace("_", " "), labelpad=10, **axis_font)
y_axis[ind].yaxis.label.set_color(colors[ind])
y_axis[ind].spines[spine_directions[ind]].set_color(colors[ind])
if tick_font:
labelsize = tick_font['labelsize']
y_axis[ind].tick_params(axis='y', labelsize=labelsize, colors=colors[ind])
legend_handles.append(h)
legend_labels.append(data['title'][0:20])
self.get_time_label(ax, xdata)
fig.autofmt_xdate()
ax.legend(legend_handles, legend_labels)
# ax.tick_params(axis='x', labelsize=10)
# ax.set_title(title.replace("_", " "), y=1.05, **title_font)
ax.grid(True)
plt.tight_layout()
def plot_ts_diagram(self, ax, sal, temp, xlabel='Salinity', ylabel='Temperature', title='',
axis_font={}, title_font={}, tick_font={}, **kwargs):
if not axis_font:
axis_font = axis_font_default
if not title_font:
title_font = title_font_default
sal = np.ma.array(sal, mask=np.isnan(sal))
temp = np.ma.array(temp, mask=np.isnan(temp))
if len(sal) != len(temp):
raise Exception('Sal and Temp arrays are not the same size!')
# Figure out boudaries (mins and maxs)
smin = sal.min() - (0.01 * sal.min())
smax = sal.max() + (0.01 * sal.max())
tmin = temp.min() - (0.1 * temp.max())
tmax = temp.max() + (0.1 * temp.max())
# Calculate how many gridcells we need in the x and y dimensions
xdim = round((smax-smin)/0.1+1, 0)
ydim = round((tmax-tmin)+1, 0)
# Create empty grid of zeros
dens = np.zeros((ydim, xdim))
# Create temp and sal vectors of appropiate dimensions
ti = np.linspace(1, ydim-1, ydim)+tmin
si = np.linspace(1, xdim-1, xdim)*0.1+smin
# Loop to fill in grid with densities
for j in range(0, int(ydim)):
for i in range(0, int(xdim)):
dens[j, i] = sw.dens(si[i], ti[j], 0)
# Substract 1000 to convert to sigma-t
dens = dens - 1000
# Plot data
cs = plt.contour(si, ti, dens, linestyles='dashed', colors='k')
plt.clabel(cs, fontsize=12, inline=1, fmt='%1.0f') # Label every second level
ppl.scatter(ax, sal, temp, **kwargs)
ax.set_xlabel(xlabel.replace("_", " "), labelpad=10, **axis_font)
ax.set_ylabel(ylabel.replace("_", " "), labelpad=10, **axis_font)
ax.set_title(title.replace("_", " "), **title_font)
ax.set_aspect(1./ax.get_data_ratio()) # make axes square
if tick_font:
ax.tick_params(**tick_font)
plt.tight_layout()
def plot_3d_scatter(self, fig, ax, x, y, z, title='', xlabel='', ylabel='', zlabel='',
title_font={}, axis_font={}, tick_font={}):
if not title_font:
title_font = title_font_default
if not axis_font:
axis_font = axis_font_default
cmap = plt.cm.jet
h = plt.scatter(x, y, c=z, cmap=cmap)
ax.set_aspect(1./ax.get_data_ratio()) # make axes square
cbar = plt.colorbar(h, orientation='vertical', aspect=30, shrink=0.9)
if xlabel:
ax.set_xlabel(xlabel.replace("_", " "), labelpad=10, **axis_font)
if ylabel:
ax.set_ylabel(ylabel.replace("_", " "), labelpad=10, **axis_font)
if zlabel:
cbar.ax.set_ylabel(zlabel.replace("_", " "), labelpad=10, **axis_font)
if tick_font:
ax.tick_params(**tick_font)
if title:
ax.set_title(title.replace("_", " "), **title_font)
ax.grid(True)
plt.tight_layout()
| apache-2.0 |
jpautom/scikit-learn | sklearn/cluster/dbscan_.py | 19 | 11713 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
PatrickOReilly/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 65 | 5529 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
wilselby/diy_driverless_car_ROS | rover_ml/utils/rosbag_to_csv.py | 1 | 8697 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/rwightman/udacity-driving-reader/blob/master/script/bagdump.py
from __future__ import print_function
from cv_bridge import CvBridge, CvBridgeError
from collections import defaultdict
import os
import sys
import cv2
import imghdr
import argparse
import functools
import numpy as np
import pandas as pd
from bagutils import *
def get_outdir(base_dir, name):
outdir = os.path.join(base_dir, name)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def check_format(data):
img_fmt = imghdr.what(None, h=data)
return 'jpg' if img_fmt == 'jpeg' else img_fmt
def write_image(bridge, outdir, msg, fmt='png'):
results = {}
image_filename = os.path.join(outdir, str(msg.header.stamp.to_nsec()) + '.' + fmt)
try:
if hasattr(msg, 'format') and 'compressed' in msg.format:
buf = np.ndarray(shape=(1, len(msg.data)), dtype=np.uint8, buffer=msg.data)
cv_image = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
if cv_image.shape[2] != 3:
print("Invalid image %s" % image_filename)
return results
results['height'] = cv_image.shape[0]
results['width'] = cv_image.shape[1]
# Avoid re-encoding if we don't have to
if check_format(msg.data) == fmt:
buf.tofile(image_filename)
else:
cv2.imwrite(image_filename, cv_image)
else:
cv_image = bridge.imgmsg_to_cv2(msg, "bgr8")
cv2.imwrite(image_filename, cv_image)
except CvBridgeError as e:
print(e)
results['filename'] = image_filename
return results
#sensor_msgs/Image
def camera2dict(msg, write_results, camera_dict):
camera_dict["timestamp"].append(msg.header.stamp.to_nsec())
camera_dict["width"].append(write_results['width'] if 'width' in write_results else msg.width)
camera_dict['height'].append(write_results['height'] if 'height' in write_results else msg.height)
camera_dict["frame_id"].append(msg.header.frame_id)
camera_dict["filename"].append(write_results['filename'])
#geometry_msgs/TwistStamped
def steering2dict(msg, steering_dict):
steering_dict["timestamp"].append(msg.header.stamp.to_nsec())
steering_dict["angle"].append(msg.twist.angular.z)
steering_dict["speed"].append(msg.twist.linear.x)
#ackermann_msgs/AckermannDriveStamped
def steering2dict_ack(msg, steering_dict):
steering_dict["timestamp"].append(msg.header.stamp.to_nsec())
steering_dict["angle"].append(msg.drive.steering_angle)
steering_dict["speed"].append(msg.drive.speed)
def camera_select(topic, select_from):
if topic.startswith('/l'):
return select_from[0]
elif topic.startswith('/c'):
return select_from[1]
elif topic.startswith('/r'):
return select_from[2]
else:
assert False, "Unexpected topic"
def main():
parser = argparse.ArgumentParser(description='Convert rosbag to images and csv.')
parser.add_argument('-o', '--outdir', type=str, nargs='?', default='/output',
help='Output folder')
parser.add_argument('-i', '--indir', type=str, nargs='?', default='/data',
help='Input folder where bagfiles are located')
parser.add_argument('-f', '--img_format', type=str, nargs='?', default='jpg',
help='Image encode format, png or jpg')
parser.add_argument('-m', dest='msg_only', action='store_true', help='Messages only, no images')
parser.add_argument('-d', dest='debug', action='store_true', help='Debug print enable')
parser.set_defaults(msg_only=False)
parser.set_defaults(debug=False)
args = parser.parse_args()
img_format = args.img_format
base_outdir = args.outdir
indir = args.indir
msg_only = args.msg_only
debug_print = args.debug
bridge = CvBridge()
include_images = False if msg_only else True
include_others = False
filter_topics = [STEERING_TOPIC]
if include_images:
filter_topics += CAMERA_TOPICS
if include_others:
filter_topics += OTHER_TOPICS
print(filter_topics)
bagsets = find_bagsets(indir, filter_topics=filter_topics)
for bs in bagsets:
print("Processing set %s" % bs.name)
sys.stdout.flush()
dataset_outdir = os.path.join(base_outdir, "%s" % bs.name)
center_outdir = get_outdir(dataset_outdir, "center")
camera_cols = ["timestamp", "width", "height", "frame_id", "filename"]
camera_dict = defaultdict(list)
steering_cols = ["timestamp", "angle", "speed"]
steering_dict = defaultdict(list)
bs.write_infos(dataset_outdir)
readers = bs.get_readers()
stats_acc = defaultdict(int)
def _process_msg(topic, msg, stats):
timestamp = msg.header.stamp.to_nsec()
if topic in CAMERA_TOPICS:
outdir = center_outdir #camera_select(topic, (left_outdir, center_outdir, right_outdir))
if debug_print:
print("%s_camera %d" % (topic[1], timestamp))
results = write_image(bridge, outdir, msg, fmt=img_format)
results['filename'] = os.path.relpath(results['filename'], dataset_outdir)
camera2dict(msg, results, camera_dict)
stats['img_count'] += 1
stats['msg_count'] += 1
elif topic == STEERING_TOPIC:
if debug_print:
print("steering %d %f" % (timestamp, msg.drive.steering_angle))
steering2dict_ack(msg, steering_dict)
stats['msg_count'] += 1
# no need to cycle through readers in any order for dumping, rip through each on in sequence
for reader in readers:
for result in reader.read_messages():
_process_msg(*result, stats=stats_acc)
if ((stats_acc['img_count'] and stats_acc['img_count'] % 1000 == 0) or
(stats_acc['msg_count'] and stats_acc['msg_count'] % 10000 == 0)):
print("%d images, %d messages processed..." %
(stats_acc['img_count'], stats_acc['msg_count']))
sys.stdout.flush()
print("Writing done. %d images, %d messages processed." %
(stats_acc['img_count'], stats_acc['msg_count']))
sys.stdout.flush()
if include_images:
camera_csv_path = os.path.join(dataset_outdir, 'camera.csv')
camera_df = pd.DataFrame(data=camera_dict, columns=camera_cols)
camera_df.to_csv(camera_csv_path, index=False)
steering_csv_path = os.path.join(dataset_outdir, 'steering.csv')
steering_df = pd.DataFrame(data=steering_dict, columns=steering_cols)
steering_df.to_csv(steering_csv_path, index=False)
gen_interpolated = True
if include_images and gen_interpolated:
# A little pandas magic to interpolate steering/gps samples to camera frames
camera_df['timestamp'] = pd.to_datetime(camera_df['timestamp'])
camera_df.set_index(['timestamp'], inplace=True)
camera_df.index.rename('index', inplace=True)
steering_df['timestamp'] = pd.to_datetime(steering_df['timestamp'])
steering_df.set_index(['timestamp'], inplace=True)
steering_df.index.rename('index', inplace=True)
merged = functools.reduce(lambda left, right: pd.merge(
left, right, how='outer', left_index=True, right_index=True), [camera_df, steering_df])
merged.interpolate(method='time', inplace=True)
filtered_cols = ['timestamp', 'width', 'height', 'frame_id', 'filename',
'angle', 'speed']
filtered = merged.loc[camera_df.index] # back to only camera rows
filtered.fillna(0.0, inplace=True)
filtered['timestamp'] = filtered.index.astype('int') # add back original timestamp integer col
filtered['width'] = filtered['width'].astype('int') # cast back to int
filtered['height'] = filtered['height'].astype('int') # cast back to int
filtered = filtered[filtered_cols] # filter and reorder columns for final output
interpolated_csv_path = os.path.join(dataset_outdir, 'interpolated.csv')
filtered.to_csv(interpolated_csv_path, header=True)
print("Done")
if __name__ == '__main__':
main()
| bsd-2-clause |
tammoippen/nest-simulator | topology/examples/test_3d_exp.py | 13 | 2956 | # -*- coding: utf-8 -*-
#
# test_3d_exp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
This example uses the function GetChildren, which is deprecated. A deprecation
warning is therefore issued. For details about deprecated functions, see
documentation.
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5),
random.uniform(-0.5, 0.5)]
for j in range(1000)]
l1 = topo.CreateLayer(
{'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_psc_alpha'})
# visualize
# xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
# xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# l1_children is a work-around until NEST 3.0 is released
l1_children = nest.GetChildren(l1)[0]
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(l1_children))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75, -0.75, -0.75],
'upper_right': [0.75, 0.75, 0.75]}},
'kernel': {'exponential':
{'c': 0., 'a': 1., 'tau': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr = topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = topo.GetTargetNodes(ctr, l1)[0]
d = topo.Distance(ctr, tgts)
plt.figure()
plt.hist(d, 25)
# plt.show()
| gpl-2.0 |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Final/best_kNN_PCA/4-categories/24/test11_cross_validate_categories_24_1200ms.py | 1 | 4733 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/24')
from data_24 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
nathanielvarona/airflow | tests/test_utils/perf/scheduler_ops_metrics.py | 3 | 7511 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
import pandas as pd
from airflow import settings
from airflow.configuration import conf
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils import timezone
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py [timeout]
You can specify timeout in seconds as an optional parameter.
Its default value is 6 seconds.
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerMetricsJob'}
def __init__(self, dag_ids, subdir, max_runtime_secs):
self.max_runtime_secs = max_runtime_secs
super().__init__(dag_ids=dag_ids, subdir=subdir)
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = session.query(TI).filter(TI.dag_id.in_(DAG_IDS)).all()
successful_tis = [x for x in tis if x.state == State.SUCCESS]
ti_perf = [
(
ti.dag_id,
ti.task_id,
ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration,
)
for ti in successful_tis
]
ti_perf_df = pd.DataFrame(
ti_perf,
columns=[
'dag_id',
'task_id',
'execution_date',
'queue_delay',
'start_delay',
'land_time',
'duration',
],
)
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print(f'DAG {dag_id}')
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(
pd.DataFrame(
[
(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)
],
columns=['dag_id', 'task_id', 'execution_date', 'state'],
)
)
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super().heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session.query(TI).filter(TI.dag_id.in_(DAG_IDS)).filter(TI.state.in_([State.SUCCESS])).all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum(
(timezone.utcnow() - task.start_date).days for dag in dags for task in dag.tasks
)
if (
len(successful_tis) == num_task_instances
or (timezone.utcnow() - self.start_date).total_seconds() > self.max_runtime_secs
):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = (
session.query(DagRun)
.filter(
DagRun.dag_id.in_(DAG_IDS),
)
.all()
)
for dr in drs:
logging.info('Deleting DagRun :: %s', dr)
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = session.query(TI).filter(TI.dag_id.in_(DAG_IDS)).all()
for ti in tis:
logging.info('Deleting TaskInstance :: %s', ti)
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dag_models = session.query(DagModel).filter(DagModel.dag_id.in_(DAG_IDS))
for dag_model in dag_models:
logging.info('Setting DAG :: %s is_paused=%s', dag_model, is_paused)
dag_model.is_paused = is_paused
session.commit()
def main():
"""
Run the scheduler metrics jobs after loading the test configuration and
clearing old instances of dags and tasks
"""
max_runtime_secs = MAX_RUNTIME_SECS
if len(sys.argv) > 1:
try:
max_runtime_secs = int(sys.argv[1])
if max_runtime_secs < 1:
raise ValueError
except ValueError:
logging.error('Specify a positive integer for timeout.')
sys.exit(1)
conf.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR, max_runtime_secs=max_runtime_secs)
job.run()
if __name__ == "__main__":
main()
| apache-2.0 |
aufziehvogel/kaggle | visualization.py | 1 | 1343 | import itertools, operator
from matplotlib import pyplot
import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import cross_validation
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import math
def plot_3d(X, y):
data = zip(y, X)
data = sorted(data, key=lambda x: x[0])
it = itertools.groupby(data, operator.itemgetter(0))
data = [(key, [item[1] for item in subiter]) for key, subiter in it]
fig = pylab.figure()
ax = Axes3D(fig)
proxies = []
labels = []
colors = [np.random.rand(3,1) for i in range(10)]
for label, points in data:
points = np.array(points)
ax.scatter(points[:,0], points[:,1], points[:,2], c=colors[label])
proxies.append(pyplot.Rectangle((0, 0), 1, 1, fc=colors[label]))
labels.append(label)
ax.legend(proxies, labels)
pyplot.show()
def eval_knn(X, y, start, end):
start_l = int(math.log(start))
end_l = int(math.log(end))
plot_x = []
plot_y = []
for n in (math.exp(x) for x in range(start_l, end_l)):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y)
knn = KNeighborsClassifier(n_neighbors=n).fit(X_train, y_train)
plot_x.append(n)
plot_y.append(knn.score(X_test, y_test))
pyplot.plot(plot_x, plot_y)
pyplot.show()
| mit |
GenosResearchGroup/ContourMetrics | lib/analysis.py | 1 | 9021 | import glob
import itertools
import os
import django
import music21
import numpy
import pandas
import scipy
import lib
from lib import Contour, Comparison
class AlgorithmsComparison(object):
def __init__(self, contour_query_set, algorithms, confidence_level=95, confidence_interval=5):
self.contour_query_set = contour_query_set
self.algorithms = algorithms
self.confidence_level = confidence_level
self.confidence_interval = confidence_interval
self.population_size = contour_query_set.count()
self.sample_size = lib.utils.sample_size(self.population_size, confidence_level, confidence_interval)
self.contours_sample = None
self.similarity_measures = []
self.a = []
def __repr__(self):
return '<AlgorithmComparison: {} {}>'.format(self.population_size, self.sample_size)
def get_contours_sample(self):
self.contours_sample = self.contour_query_set.order_by('?')[:self.sample_size]
def calculate_similarity(self):
if type(self.contours_sample) != django.db.models.query.QuerySet:
self.get_contours_sample()
contours_map = map(lambda c: c['normalized'], self.contours_sample.values('normalized'))
seq = []
self.a = []
for a, b in itertools.combinations(contours_map, 2):
ca = Contour(a)
cb = Contour(b)
comp = Comparison(ca, cb)
seq.append([comp.compare(algorithm) for algorithm in self.algorithms])
self.a.append(comp)
self.similarity_measures = lib.utils.ExtendedDataFrame(seq, columns=self.algorithms)
def ks_test(self):
if type(self.contours_sample) != django.db.models.query.QuerySet:
self.calculate_similarity()
ind = []
seq = []
for a, b in itertools.combinations(self.algorithms, 2):
edf = lib.utils.ExtendedDataFrame(self.similarity_measures[[a, b]])
seq.append(edf.ks_test())
ind.append('{} {}'.format(a, b))
return pandas.DataFrame(seq, index=ind)
def entropy(self):
if type(self.contours_sample) != django.db.models.query.QuerySet:
self.calculate_similarity()
return pandas.Series(scipy.stats.entropy(self.similarity_measures), index=self.algorithms)
class GeneralComparison(object):
def __init__(self, contours):
if not (isinstance(contours, list) and
all(map(lambda c: isinstance(c, Contour) and
all(map(lambda el: isinstance(el, (int, float)), c.sequence)), contours))):
raise AttributeError('The class attribute must be a list of Contour objects')
self.contours = contours
self.size = len(contours)
self.similarity = None
self.algorithm = None
self.pretty = [c.__repr__() for c in contours]
def __repr__(self):
return '<GeneralComparison: {} contours>'.format(self.size)
def similarity_map(self, algorithm='AGP'):
m = numpy.zeros(self.size ** 2).reshape(self.size, self.size)
for i in range(self.size):
c1 = self.contours[i]
for j in range(i, self.size):
c2 = self.contours[j]
comp = Comparison(c1, c2)
value = comp.compare(algorithm)
m[i][j] = value
m[j][i] = value
df = lib.utils.ExtendedDataFrame(m, columns=self.pretty, index=self.pretty)
self.similarity = df
self.algorithm = algorithm
return df
def similarity_series(self, algorithm='AGP'):
if self.algorithm != algorithm:
self.similarity = self.similarity_map(algorithm)
sim = numpy.array(self.similarity)
size = len(sim)
seq = []
for i in range(size):
for j in range(i + 1, size):
seq.append(sim[i][j])
return pandas.Series(seq)
def get_contour_method(self, method, argument=None):
seq = []
for c in self.contours:
if argument:
seq.append(getattr(c, method)(argument))
else:
seq.append(getattr(c, method)())
return seq
def features_table(self):
columns = ['Direction', 'Oscillation', 'Diversity']
seq = []
for c in self.contours:
seq.append([
c.direction_index(),
c.oscillation_index(),
c.points_diversity_index()
])
return pandas.DataFrame(seq, columns=columns, index=self.pretty)
class CollectionComparison(object):
def __init__(self, filenames):
self.filenames = glob.glob(filenames)
self.basenames = []
self.collection_pieces = []
self.contours = []
self.similarity = None
self.algorithm = None
def __repr__(self):
return '<CollectionComparison: {} files>'.format(len(self.filenames))
def get_info(self):
for f in self.filenames:
cp = CollectionPiece(f)
cp.get_info()
self.collection_pieces.append(cp)
self.contours.append(cp.contour)
self.basenames.append(os.path.basename(f))
def similarity_map(self, algorithm='AGP'):
if not self.contours:
self.get_info()
size = len(self.filenames)
m = numpy.zeros(size ** 2).reshape(size, size)
for i in range(size):
c1 = self.contours[i]
for j in range(i, size):
c2 = self.contours[j]
comp = Comparison(c1, c2)
value = comp.compare(algorithm)
m[i][j] = value
m[j][i] = value
df = lib.utils.ExtendedDataFrame(m, columns=self.basenames, index=self.basenames)
self.similarity = df
self.algorithm = algorithm
return df
def similarity_series(self, algorithm='AGP'):
if self.algorithm != algorithm:
self.similarity = self.similarity_map(algorithm)
sim = numpy.array(self.similarity)
size = len(sim)
seq = []
for i in range(size):
for j in range(i + 1, size):
seq.append(sim[i][j])
return pandas.Series(seq)
def get_contour_method(self, method, argument=None):
if not self.contours:
self.get_info()
seq = []
for c in self.contours:
if argument:
seq.append(getattr(c, method)(argument))
else:
seq.append(getattr(c, method)())
return seq
def features_table(self):
if not self.contours:
self.get_info()
columns = ['Direction', 'Oscillation', 'Diversity']
seq = []
for c in self.contours:
seq.append([
c.direction_index(),
c.oscillation_index(),
c.points_diversity_index()
])
return pandas.DataFrame(seq, columns=columns, index=self.basenames)
# FIXME: add comparison methods
class CollectionPiece(object):
def __init__(self, filename):
self.score = None
self.filename = filename
self.midi = []
self.pitches = []
self.durations = []
self.contour = None
self.mode = None
self.key = None
self.time_signature = None
def __repr__(self):
return '<CollectionPiece: {}>'.format(os.path.basename(self.filename))
def get_info(self):
if not self.score:
self.score = music21.converter.parse(self.filename)
part = self.score.parts[0]
measures = part.getElementsByClass('Measure')
if measures:
first_measure = part.getElementsByClass('Measure')[0]
key_obj = first_measure.getElementsByClass('Key')
time_obj = first_measure.getElementsByClass('TimeSignature')
else:
key_obj = part.getElementsByClass('Key')
time_obj = part.getElementsByClass('TimeSignature')
if key_obj:
key, mode = key_obj[0].name.split(' ')
if mode == 'major':
self.mode = 'M'
else:
self.mode = 'm'
self.key = key
if time_obj:
time_obj = time_obj[0]
self.time_signature = '{}/{}'.format(time_obj.numerator, time_obj.denominator)
for el in part.flat.notesAndRests:
if el.isNote:
self.pitches.append(el.pitch.nameWithOctave)
self.midi.append(el.pitch.midi)
elif el.isChord:
self.pitches.append(el.pitches[-1].nameWithOctave)
self.midi.append(el.pitches[-1].midi)
elif el.isRest:
self.pitches.append('R')
self.durations.append(el.duration.quarterLength)
if self.midi:
self.contour = Contour(self.midi).remove_adjacent_repeats().normalize()
else:
self.contour = Contour([])
| mit |
chermes/python-ukr | examples/rotatingMonkey/monkey.py | 1 | 2472 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Generates, interpolates and visualizes the "monkey head" manifold. The head
is the Blender mascot Suzanne rotating around each three-dimensional axis.
Author: Christoph Hermes
Created on Februar 21, 2015 19:38:28
The MIT License (MIT)
Copyright (c) 2015 Christoph Hermes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# make the UKR module visible to Python
import os, sys
lib_path = os.path.abspath(os.path.join('..', '..', 'src_naive'))
sys.path.append(lib_path)
import glob
import numpy as np
import scipy.ndimage as ndi
import sklearn.manifold
import sklearn.decomposition
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import ukr
# load the monkey head images into memory
D = sorted(glob.glob('images_rotatingMonkey/*.png'))
X_raw = np.zeros((len(D), np.prod(ndi.imread(D[0]).shape)))
for imgI, img in enumerate(D):
X_raw[imgI] = ndi.imread(img).astype(np.float64).flatten()
N = X_raw.shape[0]
initEmbed = sklearn.decomposition.PCA(3)
model = ukr.UKR(n_components=3, kernel=ukr.student_k(2), n_iter=1000, embeddings=[initEmbed], metric=2, enforceCycle=True)
mani = model.fit_transform(X_raw)
f = plt.figure()
f.clf()
ax = Axes3D(f)
ax.plot3D(mani[:N/3,0], mani[:N/3,1], mani[:N/3,2], '.-', label='pitch')
ax.plot3D(mani[N/3:N*2/3,0], mani[N/3:N*2/3,1], mani[N/3:N*2/3,2], '.-', label='yaw')
ax.plot3D(mani[N*2/3:,0], mani[N*2/3:,1], mani[N*2/3:,2], '.-', label='roll')
plt.legend()
plt.show()
| mit |
chenyyx/scikit-learn-doc-zh | examples/zh/decomposition/plot_ica_blind_source_separation.py | 52 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
# #############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
# #############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| gpl-3.0 |
ryfeus/lambda-packs | Pandas_numpy/source/pandas/core/categorical.py | 1 | 80777 | # pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import types
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_platform_int,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar,
is_dict_like)
from pandas.core.common import is_null_slice, _maybe_box_datetimelike
from pandas.core.algorithms import factorize, take_1d, unique1d
from pandas.core.accessor import PandasDelegate
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg
from pandas.core.config import get_option
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of values.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If not given, the resulting categorical will not be ordered.
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype()
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
ordered = dtype.ordered
elif is_categorical(values):
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = dtype
return
# sanitize input
if is_categorical_dtype(values):
# we are either a Series or a CategoricalIndex
if isinstance(values, (ABCSeries, ABCCategoricalIndex)):
values = values._values
if ordered is None:
ordered = values.ordered
if categories is None:
categories = values.categories
values = values.get_values()
elif isinstance(values, (ABCIndexClass, ABCSeries)):
# we'll do inference later
pass
else:
# on numpy < 1.6 datetimelike get inferred to all i8 by
# _sanitize_array which is fine, but since factorize does this
# correctly no need here this is an issue because _sanitize_array
# also coerces np.nan to a string under certain versions of numpy
# as well
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# On list with NaNs, int values will be converted to float. Use
# "object" dtype to prevent this. In the end objects will be
# casted to int/... in the category assignment step.
if len(values) == 0 or isna(values).any():
sanitize_dtype = 'object'
else:
sanitize_dtype = None
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
if dtype.categories is None:
# we're inferring from values
dtype = CategoricalDtype(categories, ordered)
else:
# there were two ways if categories are present
# - the old one, where each value is a int pointer to the levels
# array -> not anymore possible, but code outside of pandas could
# call us like that, so make some checks
# - the new one, where each value is also in the categories array
# (or np.nan)
codes = _get_codes_for_values(values, dtype.categories)
# TODO: check for old style usage. These warnings should be removes
# after 0.18/ in 2016
if (is_integer_dtype(values) and
not is_integer_dtype(dtype.categories)):
warn("Values and categories have different dtypes. Did you "
"mean to use\n'Categorical.from_codes(codes, "
"categories)'?", RuntimeWarning, stacklevel=2)
if (len(values) and is_integer_dtype(values) and
(codes == -1).all()):
warn("None of the categories were found in values. Did you "
"mean to use\n'Categorical.from_codes(codes, "
"categories)'?", RuntimeWarning, stacklevel=2)
self._dtype = dtype
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :ref:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _constructor(self):
return Categorical
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
categories=self.categories,
ordered=self.ordered,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
if copy is True:
return self.copy()
return self
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
if is_datetimelike(self.categories):
return [_maybe_box_datetimelike(x) for x in self]
return np.array(self).tolist()
def reshape(self, new_shape, *args, **kwargs):
"""
.. deprecated:: 0.19.0
Calling this method will raise an error in a future release.
An ndarray-compatible method that returns `self` because
`Categorical` instances cannot actually be reshaped.
Parameters
----------
new_shape : int or tuple of ints
A 1-D array of integers that correspond to the new
shape of the `Categorical`. For more information on
the parameter, please refer to `np.reshape`.
"""
warn("reshape is deprecated and will raise "
"in a subsequent release", FutureWarning, stacklevel=2)
nv.validate_reshape(args, kwargs)
# while the 'new_shape' parameter has no effect,
# we should still enforce valid shape parameters
np.reshape(self.codes, new_shape)
return self
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation oder to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_array(cls, data, **kwargs):
"""
.. deprecated:: 0.19.0
Use ``Categorical`` instead.
Make a Categorical type from a single array-like object.
For internal compatibility with numpy arrays.
Parameters
----------
data : array-like
Can be an Index or array-like. The categories are assumed to be
the unique values of `data`.
"""
warn("Categorical.from_array is deprecated, use Categorical instead",
FutureWarning, stacklevel=2)
return cls(data, **kwargs)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
try:
codes = np.asarray(codes, np.int64)
except:
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype._validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _get_labels(self):
"""
Get the category labels (deprecated).
Deprecated, use .codes!
"""
warn("'labels' is deprecated. Use 'codes' instead", FutureWarning,
stacklevel=2)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _codes_for_groupby(self, sort):
"""
If sort=False, return a copy of self, coded with categories as
returned by .unique(), followed by any categories not appearing in
the data. If sort=True, return self.
This method is needed solely to ensure the categorical index of the
GroupBy result has categories in the order of appearance in the data
(GH-8868).
Parameters
----------
sort : boolean
The value of the sort paramter groupby was called with.
Returns
-------
Categorical
If sort=False, the new categories are set to the order of
appearance in codes (unless ordered=True, in which case the
original order is preserved), followed by any unrepresented
categories in the original order.
"""
# Already sorted according to self.categories; all is fine
if sort:
return self
# sort=False should order groups in as-encountered order (GH-8868)
cat = self.unique()
# But for groupby to work, all categories should be present,
# including those missing from the data (GH-13179), which .unique()
# above dropped
cat.add_categories(
self.categories[~self.categories.isin(cat.categories)],
inplace=True)
return self.reorder_categories(cat.categories)
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like or dict-like
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored. *New in version 0.21.0*.
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be Categorical which has
the same order property as the original. Otherwise, the result will
be np.ndarray.
Returns
-------
applied : Categorical or Index.
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesnt make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, _ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype._validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='v', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Both missing values (-1 in .codes) and NA as a category are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
# String/object and float categories can hold np.nan
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
nan_pos = np.where(isna(self.categories))[0]
# we only have one NA in categories
ret = np.logical_or(ret, self._codes == nan_pos)
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Both missing values (-1 in .codes) and NA as a category are detected.
NA is removed from the categories if present.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
if isna(result.categories).any():
result = result.remove_categories([np.nan])
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is a category.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import isna, Series, CategoricalIndex
obj = (self.remove_categories([np.nan]) if dropna and
isna(self.categories).any() else self)
code, cat = obj._codes, obj.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Returns the indices that would sort the Categorical instance if
'sort_values' was called. This function is implemented to provide
compatibility with numpy ndarray objects.
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
result = np.argsort(self._codes.copy(), kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank())
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar
Value to use to fill holes (e.g. 0)
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
values = self._codes
# Make sure that we also get NA in categories
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
values = values.copy()
nan_pos = np.where(isna(self.categories))[0]
# we only have one NA in categories
values[values == nan_pos] = -1
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
else:
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = values == -1
if mask.any():
values = values.copy()
if isna(value):
values[mask] = -1
else:
values[mask] = self.categories.get_loc(value)
return self._constructor(values, categories=self.categories,
ordered=self.ordered, fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
""" Take the codes by the indexer, fill with the fill_value.
For internal compatibility with numpy arrays.
"""
# filling must always be None/nan here
# but is passed thru internally
assert isna(fill_value)
codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
result = self._constructor(codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
_codes = self._codes[slicer]
return self._constructor(values=_codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values())
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if com.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
categories=self.categories,
ordered=self.ordered, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
# FIXME: the following can be removed after GH7820 is fixed:
# https://github.com/pandas-dev/pandas/issues/7820
# float categories do currently return -1 for np.nan, even if np.nan is
# included in the index -> "repair" this here
if isna(rvalue).any() and isna(self.categories).any():
nan_pos = np.where(isna(self.categories))[0]
lindexer[lindexer == -1] = nan_pos
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation """
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(numeric_only=numeric_only, **kwds)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
good = self._codes != -1
values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))
result = self._constructor(values=values, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = sorted(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
return (self.is_dtype_equal(other) and
np.array_equal(self._codes, other._codes))
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
# The Series.cat accessor
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, values, index, name):
self.categorical = values
self.index = index
self.name = name
self._freeze()
def _delegate_property_get(self, name):
return getattr(self.categorical, name)
def _delegate_property_set(self, name, new_values):
return setattr(self.categorical, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self.categorical.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.categorical, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
@classmethod
def _make_accessor(cls, data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
return CategoricalAccessor(data.values, data.index,
getattr(data, 'name', None),)
CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
accessors=["categories",
"ordered"],
typ='property')
CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[
"rename_categories", "reorder_categories", "add_categories",
"remove_categories", "remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"], typ='method')
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if not is_dtype_equal(values.dtype, categories.dtype):
values = _ensure_object(values)
categories = _ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
isinstance(list_like, types.GeneratorType)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
cat = Categorical(values, ordered=True)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
| mit |
jalonsob/Informes | vizGrimoireJS/generic_report.py | 3 | 18311 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## Copyright (C) 2014 Bitergia
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##
##
## Authors:
## Daniel Izquierdo-Cortazar <[email protected]>
##
## This scripts aims at providing an easy way to obtain some figures and json/csv files
## for a set of basic metrics per data source.
##
## python openstack_report.py -a dic_cvsanaly_openstack_2259 -d dic_bicho_openstack_gerrit_3392_bis -i dic_cvsanaly_openstack_2259 -r 2013-07-01,2013-10-01,2014-01-01,2014-04-01,2014-07-01 -c lcanas_bicho_openstack_1376 -b lcanas_mlstats_openstack_1376 -f dic_sibyl_openstack_3194_new -e dic_irc_openstack_3277
import imp, inspect
from optparse import OptionParser
from os import listdir, path, environ
from os.path import isfile, join
import sys
import locale
import matplotlib as mpl
# This avoids the use of the $DISPLAY value for the charts
mpl.use('Agg')
import matplotlib.pyplot as plt
import prettyplotlib as ppl
from prettyplotlib import brewer2mpl
import numpy as np
from datetime import datetime
def bar_chart(title, labels, data1, file_name, data2 = None, legend=["", ""]):
colors = ["orange", "grey"]
fig, ax = plt.subplots(1)
xpos = np.arange(len(data1))
width = 0.35
plt.title(title)
y_pos = np.arange(len(data1))
if data2 is not None:
ppl.bar(xpos+width, data1, color="orange", width=0.35, annotate=True)
ppl.bar(xpos, data2, grid='y', width = 0.35, annotate=True)
plt.xticks(xpos+width, labels)
plt.legend(legend)
else:
ppl.bar(xpos, data1, grid='y', annotate=True)
plt.xticks(xpos+width, labels)
plt.savefig(file_name + ".eps")
plt.close()
def ts_chart(title, unixtime_dates, data, file_name):
fig = plt.figure()
plt.title(title)
dates = []
for unixdate in unixtime_dates:
dates.append(datetime.fromtimestamp(float(unixdate)))
ppl.plot(dates, data)
fig.autofmt_xdate()
fig.savefig(file_name + ".eps")
def barh_chart(title, yvalues, xvalues, file_name):
fig, ax = plt.subplots(1)
x_pos = np.arange(len(xvalues))
plt.title(title)
y_pos = np.arange(len(yvalues))
#plt.barh(y_pos, xvalues)
ppl.barh(y_pos, xvalues, grid='x')
ppl.barh(y_pos, xvalues, grid='x')
plt.yticks(y_pos, yvalues)
plt.savefig(file_name + ".eps")
plt.close()
def read_options():
parser = OptionParser(usage="usage: %prog [options]",
version="%prog 0.1")
parser.add_option("-a", "--dbcvsanaly",
action="store",
dest="dbcvsanaly",
help="CVSAnalY db where information is stored")
parser.add_option("-b", "--dbmlstats",
action="store",
dest="dbmlstats",
help="Mailing List Stats db where information is stored")
parser.add_option("-c", "--dbbicho",
action="store",
dest="dbbicho",
help="Bicho db where information is stored")
parser.add_option("-d", "--dbreview",
action="store",
dest="dbreview",
help="Review db where information is stored")
parser.add_option("-e", "--dbirc",
action="store",
dest="dbirc",
help="IRC where information is stored")
parser.add_option("-f", "--dbqaforums",
action="store",
dest="dbqaforums",
help="QAForums where information is stored")
parser.add_option("-i", "--identities",
action="store",
dest="dbidentities",
help="Database with unique identities and affiliations")
parser.add_option("-u","--dbuser",
action="store",
dest="dbuser",
default="root",
help="Database user")
parser.add_option("-p","--dbpassword",
action="store",
dest="dbpassword",
default="",
help="Database password")
parser.add_option("-r", "--releases",
action="store",
dest="releases",
default="2010-01-01,2011-01-01,2012-01-01",
help="Releases for the report")
parser.add_option("-t", "--type",
action="store",
dest="backend",
default="bugzilla",
help="Type of backend: bugzilla, allura, jira, github")
parser.add_option("-g", "--granularity",
action="store",
dest="granularity",
default="months",
help="year,months,weeks granularity")
parser.add_option("--npeople",
action="store",
dest="npeople",
default="10",
help="Limit for people analysis")
parser.add_option("--dir",
action="store",
dest="output_dir",
default="./report/")
# TBD
#parser.add_option("--list-metrics",
# help="List available metrics")
(opts, args) = parser.parse_args()
return opts
def build_releases(releases_dates):
# Builds a list of tuples of dates that limit
# each of the timeperiods to analyze
releases = []
dates = releases_dates.split(",")
init = dates[0]
for date in dates:
if init <> date:
releases.append((init, date))
init = date
return releases
def scm_report(dbcon, filters, output_dir):
# Basic activity and community metrics in source code
# management systems
dataset = {}
from vizgrimoire.analysis.onion_model import CommunityStructure
onion = CommunityStructure(dbcon, filters)
result = onion.result()
dataset["scm_core"] = result["core"]
dataset["scm_regular"] = result["regular"]
dataset["scm_occasional"] = result["occasional"]
authors_period = scm.AuthorsPeriod(dbcon, filters)
dataset["scm_authorsperiod"] = float(authors_period.get_agg()["avg_authors_month"])
authors = scm.Authors(dbcon, filters)
top_authors = authors.get_list()
createJSON(top_authors, output_dir + "scm_top_authors.json")
createCSV(top_authors, output_dir + "scm_top_authors.csv")
commits = scm.Commits(dbcon, filters)
dataset["scm_commits"] = commits.get_agg()["commits"]
authors = scm.Authors(dbcon, filters)
dataset["scm_authors"] = authors.get_agg()["authors"]
#companies = scm.Companies(dbcon, filters)
#top_companies = companies.get_list(filters)
#createJSON()
#createCSV()
return dataset
def its_report(dbcon, filters):
# basic metrics for ticketing systems
dataset = {}
from vizgrimoire.ITS import ITS
ITS.set_backend("launchpad")
opened = its.Opened(dbcon, filters)
dataset["its_opened"] = opened.get_agg()["opened"]
closed = its.Closed(dbcon, filters)
dataset["its_closed"] = closed.get_agg()["closed"]
return dataset
def scr_report(dbcon, filters):
# review system basic set of metrics
dataset = {}
submitted = scr.Submitted(dbcon, filters)
dataset["scr_submitted"] = submitted.get_agg()["submitted"]
merged = scr.Merged(dbcon, filters)
dataset["scr_merged"] = merged.get_agg()["merged"]
abandoned = scr.Abandoned(dbcon, filters)
dataset["scr_abandoned"] = abandoned.get_agg()["abandoned"]
waiting4reviewer = scr.ReviewsWaitingForReviewer(dbcon, filters)
dataset["scr_waiting4reviewer"] = waiting4reviewer.get_agg()["ReviewsWaitingForReviewer"]
waiting4submitter = scr.ReviewsWaitingForSubmitter(dbcon, filters)
dataset["scr_waiting4submitter"] = waiting4submitter.get_agg()["ReviewsWaitingForSubmitter"]
filters.period = "month"
time2review = scr.TimeToReview(dbcon, filters)
dataset["scr_review_time_days_median"] = round(time2review.get_agg()["review_time_days_median"], 2)
dataset["scr_review_time_days_avg"] = round(time2review.get_agg()["review_time_days_avg"], 2)
return dataset
def serialize_threads(threads, crowded, threads_object):
l_threads = {}
if crowded:
l_threads['people'] = []
else:
l_threads['len'] = []
l_threads['subject'] = []
l_threads['date'] = []
l_threads['initiator'] = []
for email_people in threads:
if crowded:
email = email_people[0]
else:
email = email_people
if crowded:
l_threads['people'].append(email_people[1])
else:
l_threads['len'].append(threads_object.lenThread(email.message_id))
subject = email.subject.replace(",", " ")
subject = subject.replace("\n", " ")
l_threads['subject'].append(subject)
l_threads['date'].append(email.date.strftime("%Y-%m-%d"))
l_threads['initiator'].append(email.initiator_name.replace(",", " "))
return l_threads
def mls_report(dbcon, filters, output_dir):
dataset = {}
emails = mls.EmailsSent(dbcon, filters)
dataset["mls_sent"] = emails.get_agg()["sent"]
senders = mls.EmailsSenders(dbcon, filters)
dataset["mls_senders"] = senders.get_agg()["senders"]
senders_init = mls.SendersInit(dbcon, filters)
dataset["mls_senders_init"] = senders_init.get_agg()["senders_init"]
from vizgrimoire.analysis.threads import Threads
SetDBChannel(dbcon.user, dbcon.password, dbcon.database)
threads = Threads(filters.startdate, filters.enddate, dbcon.identities_db)
top_longest_threads = threads.topLongestThread(10)
top_longest_threads = serialize_threads(top_longest_threads, False, threads)
createJSON(top_longest_threads, output_dir + "/mls_top_longest_threads.json")
createCSV(top_longest_threads, output_dir + "/mls_top_longest_threads.csv")
top_crowded_threads = threads.topCrowdedThread(10)
top_crowded_threads = serialize_threads(top_crowded_threads, True, threads)
createJSON(top_crowded_threads, output_dir + "/mls_top_crowded_threads.json")
createCSV(top_crowded_threads, output_dir + "/mls_top_crowded_threads.csv")
return dataset
def parse_urls(urls):
qs_aux = []
for url in urls:
url = url.replace("https://ask.openstack.org/en/question/", "")
url = url.replace("_", "\_")
qs_aux.append(url)
return qs_aux
def qaforums_report(dbcon, filters, output_dir):
# basic metrics for qaforums
dataset = {}
questions = qa.Questions(dbcon, filters)
dataset["qa_questions"] = questions.get_agg()["qsent"]
answers = qa.Answers(dbcon, filters)
dataset["qa_answers"] = answers.get_agg()["asent"]
comments = qa.Comments(dbcon, filters)
dataset["qa_comments"] = comments.get_agg()["csent"]
q_senders = qa.QuestionSenders(dbcon, filters)
dataset["qa_qsenders"] = q_senders.get_agg()["qsenders"]
import vizgrimoire.analysis.top_questions_qaforums as top
tops = top.TopQuestions(dbcon, filters)
commented = tops.top_commented()
commented["qid"] = commented.pop("question_identifier")
# Taking the last part of the URL
commented["site"] = parse_urls(commented.pop("url"))
createJSON(commented, output_dir + "/qa_top_questions_commented.json")
createCSV(commented, output_dir + "/qa_top_questions_commented.csv")
visited = tops.top_visited()
visited["qid"] = visited.pop("question_identifier")
visited["site"] = parse_urls(visited.pop("url"))
createJSON(visited, output_dir + "/qa_top_questions_visited.json")
createCSV(visited, output_dir + "/qa_top_questions_visited.csv")
crowded = tops.top_crowded()
crowded["qid"] = crowded.pop("question_identifier")
crowded["site"] = parse_urls(crowded.pop("url"))
createJSON(crowded, output_dir + "/qa_top_questions_crowded.json")
createCSV(crowded, output_dir + "./qa_top_questions_crowded.csv")
filters.npeople = 15
createJSON(tops.top_tags(), output_dir + "/qa_top_tags.json")
createCSV(tops.top_tags(), output_dir + "/qa_top_tags.csv")
return dataset
def irc_report(dbcon, filters, output_dir):
# irc basic report
dataset = {}
sent = irc.Sent(dbcon, filters)
dataset["irc_sent"] = sent.get_agg()["sent"]
senders = irc.Senders(dbcon, filters)
dataset["irc_senders"] = senders.get_agg()["senders"]
top_senders = senders.get_list()
createJSON(top_senders, output_dir + "/irc_top_senders.json")
createCSV(top_senders, output_dir + "/irc_top_senders.csv")
return dataset
# Until we use VizPy we will create JSON python files with _py
def createCSV(data, filepath, skip_fields = []):
fd = open(filepath, "w")
keys = list(set(data.keys()) - set(skip_fields))
header = u''
for k in keys:
header += unicode(k)
header += u','
header = header[:-1]
body = ''
length = len(data[keys[0]]) # the length should be the same for all
cont = 0
while (cont < length):
for k in keys:
try:
body += unicode(data[k][cont])
except UnicodeDecodeError:
body += u'ERROR'
body += u','
body = body[:-1]
body += u'\n'
cont += 1
fd.write(header.encode('utf-8'))
fd.write('\n')
fd.write(body.encode('utf-8'))
fd.close()
def init_env():
grimoirelib = path.join("..","vizgrimoire")
metricslib = path.join("..","vizgrimoire","metrics")
studieslib = path.join("..","vizgrimoire","analysis")
alchemy = path.join("..","grimoirelib_alch")
for dir in [grimoirelib,metricslib,studieslib,alchemy]:
sys.path.append(dir)
# env vars for R
environ["LANG"] = ""
environ["R_LIBS"] = "../../r-lib"
def draw(dataset, labels, output_dir):
# create charts and write down csv files with list of metrics
for metric in dataset.keys():
bar_chart(metric, labels, dataset[metric], output_dir + "/" + metric)
def update_data(data, dataset):
# dataset is a list of metrics eg: {"metric":value2}
# data contains the same list of metrics, but with previous information
# eg: {"metric":[value0, value1]}
# the output would be: {"metric":[value0, value1, value2]}
for metric in dataset.keys():
if data.has_key(metric):
data[metric].append(dataset[metric])
else:
data[metric] = [dataset[metric]]
return data
if __name__ == '__main__':
locale.setlocale(locale.LC_ALL, 'en_US')
init_env()
from vizgrimoire.metrics.metrics import Metrics
from vizgrimoire.metrics.query_builder import DSQuery, SCMQuery, QAForumsQuery, MLSQuery, SCRQuery, ITSQuery, IRCQuery
from vizgrimoire.metrics.metrics_filter import MetricFilters
import vizgrimoire.metrics.scm_metrics as scm
import vizgrimoire.metrics.qaforums_metrics as qa
import vizgrimoire.metrics.mls_metrics as mls
import vizgrimoire.metrics.scr_metrics as scr
import vizgrimoire.metrics.its_metrics as its
import vizgrimoire.metrics.irc_metrics as irc
from vizgrimoire.GrimoireUtils import createJSON
from vizgrimoire.GrimoireSQL import SetDBChannel
# parse options
opts = read_options()
# obtain list of releases by tuples [(date1, date2), (date2, date3), ...]
releases = build_releases(opts.releases)
# Projects analysis. This includes SCM, SCR and ITS.
people_out = ["OpenStack Jenkins","Launchpad Translations on behalf of nova-core","Jenkins","OpenStack Hudson","[email protected]","[email protected]","Openstack Project Creator","Openstack Gerrit","openstackgerrit"]
affs_out = ["-Bot","-Individual","-Unknown"]
labels = ["2012-S2", "2013-S1", "2013-S2", "2014-S1"]
data = {}
for release in releases:
dataset = {}
startdate = "'" + release[0] + "'"
enddate = "'" + release[1] + "'"
filters = MetricFilters("month", startdate, enddate, [], opts.npeople, people_out, affs_out)
if opts.dbcvsanaly is not None:
dbcon = SCMQuery(opts.dbuser, opts.dbpassword, opts.dbcvsanaly, opts.dbidentities)
dataset.update(scm_report(dbcon, filters, opts.output_dir))
if opts.dbbicho is not None:
dbcon = ITSQuery(opts.dbuser, opts.dbpassword, opts.dbbicho, opts.dbidentities)
dataset.update(its_report(dbcon, filters))
if opts.dbreview is not None:
dbcon = SCRQuery(opts.dbuser, opts.dbpassword, opts.dbreview, opts.dbidentities)
dataset.update(scr_report(dbcon, filters))
if opts.dbirc is not None:
dbcon = IRCQuery(opts.dbuser, opts.dbpassword, opts.dbirc, opts.dbidentities)
dataset.update(irc_report(dbcon, filters, opts.output_dir))
if opts.dbqaforums is not None:
dbcon = QAForumsQuery(opts.dbuser, opts.dbpassword, opts.dbqaforums, opts.dbidentities)
dataset.update(qaforums_report(dbcon, filters, opts.output_dir))
if opts.dbmlstats is not None:
dbcon = MLSQuery(opts.dbuser, opts.dbpassword, opts.dbmlstats, opts.dbidentities)
dataset.update(mls_report(dbcon, filters, opts.output_dir))
data = update_data(data, dataset)
createJSON(dataset, opts.output_dir + "/report.json")
draw(data, labels, opts.output_dir)
| gpl-3.0 |
tmhm/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
sammy-net/racedataviz | src/tplot.py | 1 | 18112 | #!/usr/bin/env python
# Copyright 2015 Josh Pieper, [email protected]. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import os
import sys
import time
import matplotlib
matplotlib.use('Qt4Agg')
matplotlib.rcParams['backend.qt4'] = 'PySide'
from matplotlib.backends import backend_qt4agg
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
SCRIPT_PATH=os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(SCRIPT_PATH, '../python'))
sys.path.append(os.path.join(SCRIPT_PATH, 'build-x86_64'))
import ui_tplot_main_window
import course_map_dialog
import rc_data
import sync_dialog
AXES = ['Left', 'Right', '3', '4']
LEGEND_LOC = {
'Left': 2,
'Right': 1,
'3': 7,
'4': 4
}
ALL_LOGS_STR = 'All'
class BoolGuard(object):
def __init__(self):
self.value = False
def __enter__(self):
self.value = True
def __exit__(self, type, value, traceback):
self.value = False
def active(self):
return self.value
def _make_timestamp_getter(all_data):
if len(all_data) == 0:
return lambda x: 0.0
sample = all_data[0]
# If any children have a timestamp field, use the first one we can
# find.
def find_child(prefix, value):
if hasattr(value, 'timestamp'):
return lambda x: _get_data(x, prefix + 'timestamp')
if not hasattr(value, '_fields'):
return None
for child in value._fields:
result = find_child(prefix + child + '.', getattr(value, child))
if result:
return result
return None
return find_child('', sample)
def _clear_tree_widget(item):
item.setText(1, '')
for i in range(item.childCount()):
child = item.child(i)
_clear_tree_widget(child)
def _set_tree_widget_data(item, records, index,
required_size=0):
if item.childCount() < required_size:
for i in range(item.childCount(), required_size):
subitem = QtGui.QTreeWidgetItem(item)
subitem.setText(0, str(i))
for i in range(item.childCount()):
child = item.child(i)
name = child.text(0)
field = records[name]
child.setText(1, str(field.records[index].value))
def _get_data(value, name):
fields = name.split('.')
for field in fields:
if isinstance(value, list):
value = value[int(field)]
else:
value = getattr(value, field)
return value
class Tplot(QtGui.QMainWindow):
def __init__(self):
super(Tplot, self).__init__()
self.ui = ui_tplot_main_window.Ui_TplotMainWindow()
self.ui.setupUi(self)
self.figure = matplotlib.figure.Figure()
self.canvas = FigureCanvas(self.figure)
self.canvas.mpl_connect('motion_notify_event', self.handle_mouse)
self.canvas.mpl_connect('key_press_event', self.handle_key_press)
self.canvas.mpl_connect('key_release_event', self.handle_key_release)
# Make QT drawing not be super slow. See:
# https://github.com/matplotlib/matplotlib/issues/2559/
def draw():
FigureCanvas.draw(self.canvas)
self.canvas.repaint()
self.canvas.draw = draw
self.left_axis = self.figure.add_subplot(111)
self.left_axis.tplot_name = 'Left'
self.axes = {
'Left' : self.left_axis,
}
layout = QtGui.QVBoxLayout(self.ui.plotFrame)
layout.addWidget(self.canvas, 1)
self.toolbar = backend_qt4agg.NavigationToolbar2QT(self.canvas, self)
self.addToolBar(self.toolbar)
self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus)
self.canvas.setFocus()
self.logs = dict()
self.COLORS = 'rgbcmyk'
self.next_color = 0
self.lines = list()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.handle_timeout)
self.time_start = None
self.time_end = None
self.time_current = None
self.ui.recordCombo.currentIndexChanged.connect(
self.handle_record_combo)
self.ui.addPlotButton.clicked.connect(self.handle_add_plot_button)
self.ui.removeButton.clicked.connect(self.handle_remove_button)
self.ui.treeWidget.itemExpanded.connect(self.handle_item_expanded)
self.tree_items = []
self.ui.treeWidget.header().setResizeMode(
QtGui.QHeaderView.ResizeToContents)
self.ui.timeSlider.valueChanged.connect(self.handle_time_slider)
self._updating_slider = BoolGuard()
self._sync_dialog = sync_dialog.SyncDialog(self)
self.ui.fastReverseButton.clicked.connect(
self.handle_fast_reverse_button)
self.ui.stepBackButton.clicked.connect(
self.handle_step_back_button)
self.ui.playReverseButton.clicked.connect(
self.handle_play_reverse_button)
self.ui.stopButton.clicked.connect(self.handle_stop_button)
self.ui.playButton.clicked.connect(self.handle_play_button)
self.ui.stepForwardButton.clicked.connect(
self.handle_step_forward_button)
self.ui.fastForwardButton.clicked.connect(
self.handle_fast_forward_button)
self.ui.actionSynchronize.triggered.connect(self._sync_dialog.show)
self._sync_dialog.time_changed.connect(self.handle_sync_changed)
self.ui.action_Quit.triggered.connect(self.close)
self.ui.action_Open.triggered.connect(self.open_dialog)
self._course_map_dialog = course_map_dialog.CourseMapDialog(self)
self._course_map_dialog.time_slider_changed.connect(
self.update_time)
self.ui.actionCourse_Map.triggered.connect(
self._course_map_dialog.show)
def open_dialog(self):
directory = ""
if len(self.logs):
directory = os.path.dirname(self.logs.values()[-1].filename)
filename = QtGui.QFileDialog.getOpenFileName(
self, "Open log file", directory)
if filename and filename[0]:
self.open(filename[0])
def open(self, filename):
try:
maybe_log = rc_data.RcData(filename)
except Exception as e:
QtGui.QMessageBox.warning(self, 'Could not open log',
'Error: ' + str(e))
return
log_name = os.path.basename(filename)
self.logs[log_name] = maybe_log
# Add the magic "all logs" item for the first log opened.
if len(self.logs) == 1:
self.ui.recordCombo.addItem(ALL_LOGS_STR)
self.ui.recordCombo.addItem(log_name)
self._sync_dialog.add_log(maybe_log)
self._course_map_dialog.add_log(log_name, maybe_log)
item = QtGui.QTreeWidgetItem()
item.setText(0, log_name)
self.ui.treeWidget.addTopLevelItem(item)
self.tree_items.append(item)
for name in self.logs[log_name].records.keys():
sub_item = QtGui.QTreeWidgetItem(item)
sub_item.setText(0, name)
def open_sync_dialog(self):
if self._sync_dialog is None:
self._sync_dialog = sync_dialog.SyncDialog()
def handle_record_combo(self):
record = self.ui.recordCombo.currentText()
if record == ALL_LOGS_STR:
# This assumes that all logs have the same fields, which
# seems likely.
record = self.logs.keys()[0]
self.ui.xCombo.clear()
self.ui.yCombo.clear()
log = self.logs[record]
default_x = None
index = [0, None]
def add_item(index, element):
name = element.name
self.ui.xCombo.addItem(name)
self.ui.yCombo.addItem(name)
if (name == rc_data.RELATIVE_TIME_FIELD or
name == rc_data.RELATIVE_DISTANCE_FIELD):
index[1] = index[0]
index[0] += 1
for item in log.records.itervalues():
add_item(index, item)
default_x = index[1]
if default_x:
self.ui.xCombo.setCurrentIndex(default_x)
def handle_add_plot_button(self):
record = self.ui.recordCombo.currentText()
xname = self.ui.xCombo.currentText()
yname = self.ui.yCombo.currentText()
if record == ALL_LOGS_STR:
for record_name in self.logs.iterkeys():
self.add_plot(record_name, xname, yname)
else:
self.add_plot(record, xname, yname)
def add_plot(self, record, xname, yname):
log = self.logs[record]
xdata = log.all(xname)
ydata = log.all(yname)
line = matplotlib.lines.Line2D(xdata, ydata)
line.tplot_record_name = record
line.tplot_has_timestamp = True
line.tplot_xname = xname
line.tplot_yname = yname
label = self.make_label(record, xname, yname)
line.set_label(label)
line.set_color(self.COLORS[self.next_color])
self.next_color = (self.next_color + 1) % len(self.COLORS)
self.lines.append(line)
axis = self.get_current_axis()
axis.add_line(line)
axis.relim()
axis.autoscale_view()
axis.legend(loc=LEGEND_LOC[axis.tplot_name])
self.ui.plotsCombo.addItem(label, line)
self.ui.plotsCombo.setCurrentIndex(self.ui.plotsCombo.count() - 1)
self.canvas.draw()
def make_label(self, record, xname, yname):
if xname == 'timestamp':
return '%s.%s' % (record, yname)
return '%s %s vs. %s' % (record, yname, xname)
def get_current_axis(self):
requested = self.ui.axisCombo.currentText()
maybe_result = self.axes.get(requested, None)
if maybe_result:
return maybe_result
result = self.left_axis.twinx()
self.axes[requested] = result
result.tplot_name = requested
return result
def get_all_axes(self):
return self.axes.values()
def handle_remove_button(self):
index = self.ui.plotsCombo.currentIndex()
if index < 0:
return
line = self.ui.plotsCombo.itemData(index)
if hasattr(line, 'tplot_marker'):
line.tplot_marker.remove()
line.remove()
self.ui.plotsCombo.removeItem(index)
self.canvas.draw()
def handle_item_expanded(self):
self.update_timeline()
def handle_sync_changed(self):
for line in self.lines:
if (line.tplot_xname == rc_data.RELATIVE_TIME_FIELD or
line.tplot_xname == rc_data.RELATIVE_DISTANCE_FIELD):
line.set_xdata(
self.logs[line.tplot_record_name].all(line.tplot_xname))
if (line.tplot_yname == rc_data.RELATIVE_TIME_FIELD or
line.tplot_yname == rc_data.RELATIVE_DISTANCE_FIELD):
line.set_ydata(
self.logs[line.tplot_record_name].all(line.tplot_yname))
self.update_timeline()
self.canvas.draw()
self._course_map_dialog.update_sync()
def update_timeline(self):
if self.time_start is not None:
return
# Look through all of the logs and find the minimum and
# maximum timestamp of each.
for name, log in self.logs.iteritems():
these_times = log.relative_times()
if len(these_times) == 0:
continue
this_min = min(these_times)
this_max = max(these_times)
if self.time_start is None or this_min < self.time_start:
self.time_start = this_min
if self.time_end is None or this_max > self.time_end:
self.time_end = this_max
self.time_current = self.time_start
self.update_time(self.time_current, update_slider=False)
def handle_mouse(self, event):
if not event.inaxes:
return
self.statusBar().showMessage('%f,%f' % (event.xdata, event.ydata))
def handle_key_press(self, event):
if event.key not in ['1', '2', '3', '4']:
return
index = ord(event.key) - ord('1')
for key, value in self.axes.iteritems():
if key == AXES[index]:
value.set_navigate(True)
else:
value.set_navigate(False)
def handle_key_release(self, event):
if event.key not in ['1', '2', '3', '4']:
return
for key, value in self.axes.iteritems():
value.set_navigate(True)
def update_time(self, new_time, update_slider=True):
new_time = max(self.time_start, min(self.time_end, new_time))
self.time_current = new_time
# Update the tree view.
self.update_tree_view(new_time)
# Update dots on the plot.
self.update_plot_dots(new_time)
# Update the text fields.
# TODO sammy find some reasonable way to display something here.
# dt = datetime.datetime.utcfromtimestamp(new_time)
# self.ui.clockEdit.setText('%04d-%02d-%02d %02d:%02d:%02.3f' % (
# dt.year, dt.month, dt.day,
# dt.hour, dt.minute, dt.second + dt.microsecond / 1e6))
self.ui.elapsedEdit.setText('%.3f' % (new_time))
self._course_map_dialog.update_time(new_time)
if update_slider:
with self._updating_slider:
elapsed = new_time - self.time_start
total_time = self.time_end - self.time_start
self.ui.timeSlider.setValue(
int(1000 * elapsed / total_time))
def handle_time_slider(self):
if self._updating_slider.active():
return
if self.time_end is None or self.time_start is None:
return
total_time = self.time_end - self.time_start
current = self.ui.timeSlider.value() / 1000.0
self.update_time(self.time_start + current * total_time,
update_slider=False)
def update_tree_view(self, time):
for item in self.tree_items:
name = item.text(0)
log = self.logs[name]
this_time_index = log.relative_index(time)
if this_time_index is None:
_clear_tree_widget(item)
else:
_set_tree_widget_data(item, log.records, this_time_index)
def update_plot_dots(self, new_time):
updated = False
for axis in self.get_all_axes():
for line in axis.lines:
if not hasattr(line, 'tplot_record_name'):
continue
if not hasattr(line, 'tplot_has_timestamp'):
continue
log = self.logs[line.tplot_record_name]
this_time_index = log.relative_index(new_time)
if this_time_index is None:
continue
if not hasattr(line, 'tplot_marker'):
line.tplot_marker = matplotlib.lines.Line2D([], [])
line.tplot_marker.set_marker('o')
line.tplot_marker.set_color(line._color)
self.left_axis.add_line(line.tplot_marker)
updated = True
xdata = log.value_at(line.tplot_xname, this_time_index)
ydata = log.value_at(line.tplot_yname, this_time_index)
line.tplot_marker.set_data(xdata, ydata)
if updated:
self.canvas.draw()
def handle_fast_reverse_button(self):
self.play_start(-self.ui.fastReverseSpin.value())
def handle_step_back_button(self):
self.play_stop()
self.update_time(self.time_current - self.ui.stepBackSpin.value())
def handle_play_reverse_button(self):
self.play_start(-1.0)
def handle_stop_button(self):
self.play_stop()
def handle_play_button(self):
self.play_start(1.0)
def handle_step_forward_button(self):
self.play_stop()
self.update_time(self.time_current + self.ui.stepForwardSpin.value())
def handle_fast_forward_button(self):
self.play_start(self.ui.fastForwardSpin.value())
def play_stop(self):
self.speed = None
self.last_time = None
self.timer.stop()
def play_start(self, speed):
self.speed = speed
self.last_time = time.time()
self.timer.start(100)
def handle_timeout(self):
if self.time_current is None:
self.update_timeline()
assert self.last_time is not None
this_time = time.time()
delta_t = this_time - self.last_time
self.last_time = this_time
self.update_time(self.time_current + delta_t * self.speed)
def main():
parser = argparse.ArgumentParser(description="Plot data from RaceCapture")
parser.add_argument("logfiles", nargs="*", help="Logfiles to add to plot")
parser.add_argument("--sync", nargs=2, metavar=('name', 'value'),
help="Channel name and value to use for "
"synchronization trigger")
args = parser.parse_args(sys.argv[1:])
app = QtGui.QApplication(sys.argv)
app.setApplicationName('tplot')
tplot = Tplot()
tplot.show()
for filename in args.logfiles:
tplot.open(filename)
if args.sync is not None:
tplot._sync_dialog.apply_trigger(args.sync[0], float(args.sync[1]))
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| gpl-3.0 |
michaelpacer/networkx | examples/drawing/labels_and_colors.py | 44 | 1330 | #!/usr/bin/env python
"""
Draw a graph with matplotlib, color by degree.
You must have matplotlib for this to work.
"""
__author__ = """Aric Hagberg ([email protected])"""
import matplotlib.pyplot as plt
import networkx as nx
G=nx.cubical_graph()
pos=nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G,pos,
nodelist=[0,1,2,3],
node_color='r',
node_size=500,
alpha=0.8)
nx.draw_networkx_nodes(G,pos,
nodelist=[4,5,6,7],
node_color='b',
node_size=500,
alpha=0.8)
# edges
nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)
nx.draw_networkx_edges(G,pos,
edgelist=[(0,1),(1,2),(2,3),(3,0)],
width=8,alpha=0.5,edge_color='r')
nx.draw_networkx_edges(G,pos,
edgelist=[(4,5),(5,6),(6,7),(7,4)],
width=8,alpha=0.5,edge_color='b')
# some math labels
labels={}
labels[0]=r'$a$'
labels[1]=r'$b$'
labels[2]=r'$c$'
labels[3]=r'$d$'
labels[4]=r'$\alpha$'
labels[5]=r'$\beta$'
labels[6]=r'$\gamma$'
labels[7]=r'$\delta$'
nx.draw_networkx_labels(G,pos,labels,font_size=16)
plt.axis('off')
plt.savefig("labels_and_colors.png") # save as png
plt.show() # display
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/results/test10_cross_validate_categories_1200ms_scaled_method_v_force_area.py | 1 | 4711 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[41:82,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:20]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
| mit |
johnmgregoire/JCAPdatavis | echem_stacked_tern4.py | 1 | 51062 | import matplotlib.cm as cm
import numpy
import pylab
import h5py, operator, copy, os, csv, sys
from echem_plate_fcns import *
from echem_plate_math import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
from quaternary_FOM_stackedtern2 import *
from quaternary_FOM_stackedtern30 import *
from quaternary_FOM_bintern import *
#os.chdir(cwd)
pylab.rc('font', family='serif', serif='Times New Roman')
elkeys=['A', 'B', 'C', 'D']
SYSTEM=67
#29,34,39
pointsize=20
opacity=.6
view_azim=-159
view_elev=30
labelquat=True
#permuteelements=[1, 2, 0, 3]
permuteelements=[0, 1, 2, 3]
allposn=True
xshift=0.
yshift=0.
allmeasurements=False
if SYSTEM==0:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop')
rootstr='20120728NiFeCoTiplate'
#expstr='CV2V_Ithresh'
#fomlabel='Potential for 0.1mA (V vs H$_2$0/O$_2$)'
#fomshift=-.2
#vmin=.3
#vmax=.6
fommult=1.
savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20120728NiFeCoTi_allplateresults'
binarylegloc=1
elif SYSTEM==1:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop')
rootstr='20120728NiFeCoTiplate'
expstr='CP1Ess'
fomlabel='Potential for 0.02mA (V vs H$_2$0/O$_2$)'
fomshift=-.2
fommult=1.
vmin=.21
vmax=.44
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.5'
savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20120728NiFeCoTi_allplateresults'
binarylegloc=9
elif SYSTEM==2:
ellabels=['Ni', 'La', 'Co', 'Ce']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/DropEchem_Aug28_Sep14_2012_results')
rootstr='2012-9_NiLaCoCe'
expstr='CV2Imax'
fomlabel='max I in CV (mA)'
fomshift=0.
fommult=1000.
vmin=.03
vmax=.54
cmap=cm.jet
aboverangecolstr='.5'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), rootstr)
binarylegloc=1
elif SYSTEM==222:
ellabels=['Ni', 'La', 'Co', 'Ce']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/DropEchem_Aug28_Sep14_2012_results')
rootstr='2012-9_NiLaCoCe'
expstr='CP4Ess'
fomlabel='Potential for 0.1mA (V vs H$_2$0/O$_2$)'
fomshift=-.2
fommult=1.
vmin=.34
vmax=.44
cmap=cm.jet_r
aboverangecolstr='.5'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), rootstr)
binarylegloc=1
elif SYSTEM==3:
ellabels=['Fe', 'Ni', 'Zr', 'Ga']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/DropEchem_Aug28_Sep14_2012_results')
rootstr='2012-08_FeNiZrGa'
expstr='CP1Ess'
fomlabel='Potential for 0.02mA (V vs H$_2$0/O$_2$)'
fomshift=-.2
fommult=1.
vmin=.2
vmax=.6
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.5'
savefolder=os.path.join(os.getcwd(), rootstr)
binarylegloc=9
elif SYSTEM==4:
ellabels=['Fe', 'Ni', 'Mg', 'Zr']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/DropEchem_Aug28_Sep14_2012_results')
rootstr='2012-8_FeNiMgZr'
expstr='CP1Ess'
fomlabel='Potential for 0.02mA (V vs H$_2$0/O$_2$)'
fomshift=-.2
fommult=1.
vmin=.2
vmax=.6
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.5'
savefolder=os.path.join(os.getcwd(), rootstr)
binarylegloc=9
elif SYSTEM==5:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/DropEchem_Aug28_Sep14_2012_results')
rootstr='2012-9_FeCoNiTi500'
expstr='CV2Imax'
fomlabel='max I in CV (mA)'
fomshift=0.
fommult=1000.
vmin=.01
vmax=.17
cmap=cm.jet
aboverangecolstr='.5'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), rootstr)
binarylegloc=1
elkeys=ellabels
elif SYSTEM==6:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1')
rootstr='2012-9_FeCoNiTi500'
expstr='CV2V_Ithresh'
fomlabel='V to reach 2E-5 A in CV (V vs H$_2$0/O$_2$)'
fomshift=-.2
fommult=1.
vmin=.35
vmax=.45
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.path.join(os.getcwd(), rootstr)
binarylegloc=1
elkeys=ellabels
elif SYSTEM==7:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1')
rootstr='2012-9_FeCoNiTi500C_fastCV'
expstr='CV2V_Ithresh'
fomlabel='V to reach 2E-5 A in CV (V vs H$_2$0/O$_2$)'
fomshift=-.2
fommult=1.
vmin=.35
vmax=.65
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.path.join(os.getcwd(), '2012-9_FeCoNiTi500')
binarylegloc=1
elkeys=ellabels
elif SYSTEM==8:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastinit_plate1')
rootstr='2012-9_FeCoNiTi_500C_fast_plate1'
expstr='I500mVoverpotLinSub'
fomlabel='I at 500mV in LinSub CV (mA)'
fomshift=0.
fommult=1000.
vmin=.03
vmax=.3
cmap=cm.jet
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.getcwd()#os.path.join(os.getcwd(), '2012-9_FeCoNiTi500')
binarylegloc=1
elkeys=ellabels
elif SYSTEM==9:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1')
rootstr='2012-9_FeCoNiTi_500C_fast_plate1'
expstr='V_IthreshCVLinSub'
fomlabel='V to reach 1E-4 A in CV (V vs H$_2$0/O$_2$)'
fomshift=-.2
fommult=1.
vmin=.42
vmax=.6
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.path.join(os.getcwd(), '2012-9_FeCoNiTi500')
binarylegloc=1
elkeys=ellabels
elif SYSTEM==10:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1')
rootstr='2012-9_FeCoNiTi_500C_fastCP_plate1'
expstr='CP1Ess'
fomlabel='V from CP at 1E-4 A (V vs H$_2$0/O$_2$)'
fomshift=-.24
fommult=1.
vmin=.42
vmax=.6
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.path.join(os.getcwd(), '2012-9_FeCoNiTi500')
binarylegloc=1
elkeys=ellabels
elif SYSTEM==11:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
rootstr='2012-9_FeCoNiTi_500C_fast_'
os.chdir(os.path.join('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/'))
expstr='I500mVoverpotLinSub'
fomlabel='I at 500mV in LinSub CV ($\mu$A)'
fomshift=0.
fommult=1.e6
vmin=30.
vmax=252.
cmap=cm.jet
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_graphs1'
binarylegloc=1
elkeys=ellabels
allposn=False
view_azim=-40
view_elev=2
elif SYSTEM==12:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
rootstr='2012-9_FeCoNiTi_500C_fastrep2_plate1'
os.chdir(os.path.join('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/', rootstr))
expstr='I500mVoverpotLinSub'
fomlabel='I at 500mV in LinSub CV (mA)'
fomshift=0.
fommult=1.e6
vmin=30.
vmax=252.
cmap=cm.jet
aboverangecolstr=''
belowrangecolstr='.3'
savefolder=os.getcwd()#os.path.join(os.getcwd(), '2012-9_FeCoNiTi500')
binarylegloc=1
elkeys=ellabels
elif SYSTEM==13:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
rootstr='2012-9_FeCoNiTi_500C_fastrep3_plate1'
os.chdir(os.path.join('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/', rootstr))
expstr='I500mVoverpotLinSub'
fomlabel='I at 500mV in LinSub CV (mA)'
fomshift=0.
fommult=1.e6
vmin=30.
vmax=252.
cmap=cm.jet
aboverangecolstr=''
belowrangecolstr='.3'
savefolder=os.getcwd()#os.path.join(os.getcwd(), '2012-9_FeCoNiTi500')
binarylegloc=1
elkeys=ellabels
elif SYSTEM==14:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
rootstr='2012-9_FeCoNiTi_500C_fast_plate1'
os.chdir(os.path.join('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/', rootstr))
expstr='I500mVoverpotLinSub'
fomlabel='I at 500mV in LinSub CV (mA)'
fomshift=0.
fommult=1000.
vmin=.03
vmax=.3
cmap=cm.jet
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_graphs2'
binarylegloc=1
elkeys=ellabels
elif SYSTEM==15:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1')
rootstr='2012-9_FeCoNiTi_500C_fast_plate1'
expstr='E_dIdEcrit'
fomlabel='V to reach 5E-4 mA/V in CV (V vs H$_2$0/O$_2$)'
fomshift=-.24
fommult=1.
vmin=.36
vmax=.505
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.getcwd()
binarylegloc=1
elkeys=ellabels
elif SYSTEM==16:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1')
rootstr='2012-9_FeCoNiTi_500C_fast_plate1'
expstr='dIdE_aveabovecrit'
fomlabel='ave dI/dE above crit (mA/V)'
fomshift=0.
fommult=1000.
vmin=.51
vmax=2.15
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.getcwd()
binarylegloc=1
elkeys=ellabels
elif SYSTEM==17:
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1')
rootstr='2012-9_FeCoNiTi_500C_fast_plate1'
expstr='dIdEmax'
fomlabel='max dI/dE mA/V'
fomshift=0.
fommult=1000.
vmin=.4
vmax=15.52
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.getcwd()
binarylegloc=1
elkeys=ellabels
elif SYSTEM==20:
ellabels=['Ni', 'Fe', 'Co', 'Al']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/201210_results/20121002NiFeCoAl_CPCVill_Plate1')
rootstr='20121002NiFeCoAl'
expstr='CV5Imax'
fomlabel='max I in CV (mA)'
fomshift=0.
fommult=1000.
vmin=.055
vmax=2.2
cmap=cm.jet
aboverangecolstr='.5'
belowrangecolstr='k'
savefolder=os.path.join(os.path.split(os.getcwd())[0], rootstr)
binarylegloc=1
elkeys=['Fe', 'Ni', 'Ti', 'Co']
elif SYSTEM==21:
ellabels=['Ni', 'Fe', 'Co', 'Al']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/201210_results/20121002NiFeCoAl_CPCVill_Plate1')
rootstr='20121002NiFeCoAl'
expstr='CP1Ess'
fomlabel='V from CP at 1E-4 A (V vs H$_2$0/O$_2$)'
fomshift=-.24
fommult=1.
vmin=.22
vmax=.502
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.path.join(os.path.split(os.getcwd())[0], rootstr)
binarylegloc=1
elkeys=['Fe', 'Ni', 'Ti', 'Co']
elif SYSTEM==22:
ellabels=['Ni', 'Fe', 'Co', 'Al']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results')
rootstr='plate'
expstr='CP1Efin'
fomlabel='V from CP at 1E-4 A (V vs H$_2$0/O$_2$)'
fomshift=-.177
fommult=1.
vmin=.19
vmax=.5
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Al']
elif SYSTEM==23:
ellabels=['Ni', 'Fe', 'Co', 'Al']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results')
rootstr='plate'
expstr='V_IthreshCVLinSub'
fomlabel='V to reach 1E-4A in LinSub CV (V vs H$_2$0/O$_2$)'
fomshift=-.177
fommult=1.
vmin=.19
vmax=.5
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Al']
elif SYSTEM==24:
ellabels=['Ni', 'Fe', 'Co', 'Al']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results')
rootstr='plate'
expstr='ImaxCVLinSub'
fomlabel='max I in LinSub CV (mA)'
fomshift=0.
fommult=1000.
vmin=.3
vmax=3.2
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Al']
elif SYSTEM==25:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='I650mVLinSub'
fomlabel='I in LinSub CV at 650mV (mA)'
fomshift=0.
fommult=1000.
vmin=.3
vmax=2.9
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==26:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='ImaxCVLinSub'
fomlabel='Imax in LinSub CV (mA)'
fomshift=0.
fommult=1000.
vmin=.3
vmax=2.9
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==27:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='CV6fwdImax'
fomlabel='Imax in CV (mA)'
fomshift=0.
fommult=1000.
vmin=.3
vmax=2.9
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==28:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='I677mVLinSub'
fomlabel='I at 500mV (O2/H2O) in LinSub CV (mA)'
fomshift=0.
fommult=1000.
vmin=.1
vmax=2.15
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==29:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='I627mVLinSub'
fomlabel='$J_\mathrm{C}$ at $V_{OER}$ = 450mV (mA cm$^{-2}$)'
fomshift=0.
fommult=1.e5
vmin=2
vmax=101.3
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
view_azim=214
view_elev=24
elif SYSTEM==30:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='I577mVLinSub'
fomlabel='I at 400mV (O2/H2O) in LinSub CV (mA)'
fomshift=0.
fommult=1000.
vmin=.02
vmax=.3
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==31:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='V_IthreshCVLinSub_100'
fomlabel='V to reach 1E-4A in LinSub CV (V vs H$_2$0/O$_2$)'
fomshift=-.177
fommult=1.
vmin=.37
vmax=.511
cmap=cm.jet_r
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==32:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='V_IthreshCVLinSub_200'
fomlabel='V to reach 2E-4A in LinSub CV (V vs H$_2$0/O$_2$)'
fomshift=-.177
fommult=1.
vmin=.385
vmax=.511
cmap=cm.jet_r
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==33:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='UVvis_T_400_925'
fomlabel='frac Trans from 400-925nm'
fomshift=0.
fommult=1.
vmin=.2#.16
vmax=1.006#1.005
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==34:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='UVvis_Ten_400_925_am1.5'
#fomlabel='frac AM1.5 en trans from 400-925nm'
fomlabel='$\eta_{\mathrm{C},T}$ , transmission efficiency'
fomshift=0.
fommult=1.
vmin=.19
vmax=1.01
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
view_azim=214
view_elev=24
elif SYSTEM==35:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='UVvis_Ten_400_925_am15__I677mV'
fomlabel='frac TransEn AM1.5 400-925nm * curr at 500mV overpot.'
fomshift=0.
fommult=1000.
vmin=.1
vmax=1.78
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==36:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='UVvis_Ten_400_925_am15__I627mV'
fomlabel='frac TransEn AM1.5 400-925nm * curr at 450mV overpot.'
fomshift=0.
fommult=1000.
vmin=.01
vmax=.9
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==37:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='UVvis_Ten_400_925_am15__I577mV'
fomlabel='frac TransEn AM1.5 400-925nm * curr at 400mV overpot.'
fomshift=0.
fommult=1000.
vmin=.01
vmax=0.28
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==38:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='UVvis_Ten_400_925_am15__0.3cutI627mV'
fomlabel='frac TransEn AM1.5 400-925nm * frac of 0.3mA at 450mV overpot.'
fomshift=0.
fommult=1.
vmin=.1
vmax=1.01
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==39:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='UVvis_Ten_400_925_am15__0.23cutI627mV'
#fomlabel='frac TransEn AM1.5 400-925nm * frac of 0.23mA at 450mV overpot.'
fomlabel='$\eta_{\mathrm{C}}$ , catalytic and optical efficiency'
fomshift=0.
fommult=1.
vmin=.1
vmax=1.01
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
view_azim=214
view_elev=24
elif SYSTEM==40:
ellabels=['Ni', 'Fe', 'Co', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results')
rootstr='plate'
expstr='UVvis_Ten_400_925_am15__0.1cutI577mV'
fomlabel='frac TransEn AM1.5 400-925nm * frac of 0.1mA at 400mV overpot.)'
fomshift=0.
fommult=1.
vmin=.1
vmax=1.01
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ti']
elif SYSTEM==41:
ellabels=['Bi', 'V', 'Ni', 'Fe']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/201212_BiVNiFe/results')
rootstr='.txt'
expstr='CA5Iphoto'
fomlabel='photocurrent Fe2/3 shorted to Pt (mA)'
fomshift=0.
fommult=1000.
vmin=-.008
vmax=.1
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Bi', 'V', 'Ni', 'Fe']
elif SYSTEM==42:
ellabels=['Bi', 'V', 'Ni', 'Fe']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/201212_BiVNiFe/results')
rootstr='.txt'
expstr='OCV0Ephoto'
fomlabel='illuminated OCV shift, in Fe2/3 wrt Pt (mV)'
fomshift=0.
fommult=1000.
vmin=-70
vmax=10
cmap=cm.jet_r
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Bi', 'V', 'Ni', 'Fe']
elif SYSTEM==43:
ellabels=['Bi', 'V', 'Ni', 'Fe']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/201212_BiVNiFe/results')
rootstr='.txt'
expstr='OCV0Ess'
fomlabel='illuminated OCV in Fe2/3 wrt Pt (mV)'
fomshift=0.
fommult=1000.
vmin=-20
vmax=5
cmap=cm.jet_r
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Bi', 'V', 'Ni', 'Fe']
elif SYSTEM==44:
ellabels=['Bi', 'V', 'Ni', 'Fe']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/201212_BiVNiFe/results')
rootstr='.txt'
expstr='OCV0Efin'
fomlabel='illuminated OCV in Fe2/3 wrt Pt (mV)'
fomshift=0.
fommult=1000.
vmin=-80
vmax=5
cmap=cm.jet_r
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['Bi', 'V', 'Ni', 'Fe']
elif SYSTEM==45:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/NiFeCoCe plates 1M NaOH/results')
rootstr='201304'
expstr='CP1Efin'
fomlabel='V for 10 mA/cm$^2$ (V vs H$_2$0/O$_2$)'
fomshift=-.187
fommult=1.
vmin=.33
vmax=.43
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='.3'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==46:
ellabels=['Ni', 'Fe', 'Co', 'La']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoLa/results')
rootstr=''
expstr='ImaxCVLinSub'
fomlabel='max I (mA/cm$^2$)'
fomshift=0.
fommult=100000.
vmin=2
vmax=165
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'La']
elif SYSTEM==47:
ellabels=['Ni', 'Fe', 'Co', 'La']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoLa/results')
rootstr='_I350mVLinSub.txt'
expstr='I350mVLinSub'
fomlabel='I at 350mV vs E$_{OER}$ (mA/cm$^2$)'
fomshift=0.
fommult=100000.
vmin=1
vmax=30
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'La']
elif SYSTEM==48:
ellabels=['Ni', 'Fe', 'Co', 'La']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoLa/results')
rootstr='_I400mVLinSub.txt'
expstr='I400mVLinSub'
fomlabel='I at 400mV vs E$_{OER}$ (mA/cm$^2$)'
fomshift=0.
fommult=100000.
vmin=1
vmax=100
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'La']
elif SYSTEM==50:
ellabels=['Ni', 'Fe', 'Ce', 'La']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCeLa/results')
rootstr='_I350mVLinSub.txt'
expstr='I350mVLinSub'
fomlabel='I at 350mV vs E$_{OER}$ (mA/cm$^2$)'
fomshift=0.
fommult=100000.
vmin=1
vmax=30
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==51:
ellabels=['Ni', 'Fe', 'Ce', 'La']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCeLa/results')
rootstr='_I400mVLinSub.txt'
expstr='I400mVLinSub'
fomlabel='I at 400mV vs E$_{OER}$ (mA/cm$^2$)'
fomshift=0.
fommult=100000.
vmin=1
vmax=100
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==53:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results')
rootstr='_I350mVLinSub.txt'
expstr='I350mVLinSub'
fomlabel='I at 350mV vs E$_{OER}$ (mA/cm$^2$)'
fomshift=0.
fommult=100000.
vmin=1
vmax=10
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==54:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results')
rootstr='_I400mVLinSub.txt'
expstr='I400mVLinSub'
fomlabel='I at 400mV vs E$_{OER}$ (mA/cm$^2$)'
fomshift=0.
fommult=100000.
vmin=1
vmax=42
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==55:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results')
rootstr='_V_IthreshCVLinSub_30.txt'
expstr='V_IthreshCVLinSub_30'
fomlabel='E for 3mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=280
vmax=400
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==56:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results')
rootstr='_V_IthreshCVLinSub_100.txt'
expstr='V_IthreshCVLinSub_100'
fomlabel='E for 10mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=350
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==561:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/results')
rootstr='_CP1Eave.txt'
expstr='CP1Eave'
fomlabel='E for 10mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=-(.187-.045)
fommult=1000.
vmin=375
vmax=475
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==57:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results')
rootstr='_V_IthreshCVLinSub_300.txt'
expstr='V_IthreshCVLinSub_300'
fomlabel='E for 30mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=380
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==58:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results')
rootstr='_TafelSlopeVperdec.txt'
expstr='TafelSlopeVperdec'
fomlabel='Tafel mV/decade'
fomshift=0.
fommult=1000.
vmin=25.
vmax=105.
cmap=cm.jet_r
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==59:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results')
rootstr='_TafelLogExCurrent.txt'
expstr='TafelLogExCurrent'
fomlabel='Tafel Log$_{10}$ I$_{ex}$/A'
fomshift=0.
fommult=1.
vmin=-18.
vmax=-8.
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==60:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/parsedresults/fom0.04_plate123')
rootstr='V_IthreshCVLinSub_100'
expstr='V_IthreshCVLinSub_100'
fomlabel='E for 10mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=350
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==61:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='V_IthreshCVLinSub_10.txt'
expstr='V_IthreshCVLinSub_10'
fomlabel='E for 1mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=290
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==62:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='V_IthreshCVLinSub_30.txt'
expstr='V_IthreshCVLinSub_30'
fomlabel='E for 3mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=310
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==63:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='V_IthreshCVLinSub_100.txt'
expstr='V_IthreshCVLinSub_100'
fomlabel='E for 10mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=354
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==64:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='V_IthreshCVLinSub_300.txt'
expstr='V_IthreshCVLinSub_300'
fomlabel='E for 30mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=389
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==65:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='V_IthreshCVLinSub_1000.txt'
expstr='V_IthreshCVLinSub_1000'
fomlabel='E for 100mA/cm$^2$ (mV vs E$_{OER}$)'
fomshift=0.
fommult=1000.
vmin=400
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==66:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='ImaxCVLinSub.txt'
expstr='ImaxCVLinSub'
fomlabel='max I in CV (mA)'
fomshift=0.
fommult=1000.
vmin=.0
vmax=1.1
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==67:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='CP4Eave.txt'
expstr='CP4Eave'
fomlabel='E for 10mA/cm$^2$ via CP (mV vs E$_{OER}$)'
fomshift=-(.187-.044)
fommult=1000.
vmin=355
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==68:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='CP5Eave.txt'
expstr='CP5Eave'
fomlabel='E for 1mA/cm$^2$ via CP (mV vs E$_{OER}$)'
fomshift=-(.187-.044)
fommult=1000.
vmin=320
vmax=440
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==69:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='CP6Eave.txt'
expstr='CP6Eave'
fomlabel='E for 19mA/cm$^2$ via CP (mV vs E$_{OER}$)'
fomshift=-(.187-.046)
fommult=1000.
vmin=385
vmax=520
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr='pink'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==70:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='TafelSlopeVperdec.txt'
expstr='TafelSlopeVperdec'
fomlabel='Tafel mV/decade from CV'
fomshift=0.
fommult=1000.
vmin=40
vmax=125
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
view_elev=20
elif SYSTEM==71:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results')
rootstr='TafelCPSlopeVperdec_filterR2.txt'
expstr='TafelCPSlopeVperdec'
fomlabel='Tafel mV/decade from CV'
fomshift=0.
fommult=1000.
vmin=40
vmax=125
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
view_azim=-151
view_elev=20
elif SYSTEM==80:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe_3V_FCV_full_plate_4835')
rootstr='Capac.txt'
expstr='Capac'
fomlabel='Capacitance (%\mu$F)'
fomshift=0.
fommult=1.e6
vmin=4
vmax=100
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['A', 'B', 'C', 'D']
elif SYSTEM==81:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe')
rootstr='Capac_filterR2fwdrevratio.txt'
expstr='Capac'
fomlabel='Capacitance ($\mu$F)'
fomshift=0.
fommult=1.e6
vmin=4
vmax=120
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==82:
ellabels=['Ni', 'Fe', 'Co', 'Ce']
os.chdir('C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe')
rootstr='dIdt_fwdrevratio_filterR2.txt'
expstr='dIdt_fwdrevratio'
fomlabel='ratio of dI/dt fwd:rev sweeps'
fomshift=0.
fommult=1.
vmin=.8
vmax=1.25
cmap=cm.jet
aboverangecolstr='pink'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
binarylegloc=1
elkeys=['Ni', 'Fe', 'Co', 'Ce']
elif SYSTEM==90:
ellabels=['Fe', 'Zn', 'Sn', 'Ti']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/FeSnZnTi/analysis')
rootstr='run01_CA0Iphoto0523.txt'
expstr='CA0Iphoto'
fomlabel='Photocurrent ($\mu$A)'
fomshift=0.
fommult=1000000.
vmin=-.04
vmax=.8
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['A', 'B', 'C', 'D']
xshift=139.5
yshift=-18.29
allmeasurements=True
elif SYSTEM==91:
ellabels=['Fe', 'Zn', 'Sn', 'Ti']
os.chdir('C:/Users/Public/Documents/EchemDropRawData/FeSnZnTi/analysis')
rootstr='run04_CA0Iphoto0523.txt'
expstr='CA0Iphoto'
fomlabel='Photocurrent ($\mu$A)'
fomshift=0.
fommult=1000000.
vmin=-.1
vmax=1.3
cmap=cm.jet
aboverangecolstr='.3'
belowrangecolstr='k'
savefolder=os.path.join(os.getcwd(), expstr)
if not os.path.isdir(savefolder):
os.mkdir(savefolder)
binarylegloc=1
elkeys=['A', 'B', 'C', 'D']
xshift=139.5
yshift=-18.29
allmeasurements=True
dpl=['', '', '']
for root, dirs, files in os.walk(os.getcwd()):
testfn=[fn for fn in files if (rootstr in fn) and (expstr in fn)]
#print testfn
for fn in testfn:
for count in range(3):
if ('late%d' %(count+1)) in fn:
dpl[count]=os.path.join(root, fn)
print 'FOM file paths:'
for dp in dpl:
print dp
dropdl=[]
for dp in dpl:
if dp=='':
dropdl+=[None]
continue
f=open(dp, mode='r')
dr=csv.DictReader(f, delimiter='\t')
dropd={}
for l in dr:
for kr in l.keys():
k=kr.strip()
if not k in dropd.keys():
dropd[k]=[]
dropd[k]+=[myeval(l[kr].strip())]
for k in dropd.keys():
dropd[k]=numpy.array(dropd[k])
f.close()
dropdl+=[dropd]
fig=pylab.figure(figsize=(9, 4.*len(dropdl)))
figquatall=[]
compsall=[]
fomall=[]
plateindall=[]
codeall=[]
for count, dropd in enumerate(dropdl):
if dropd is None:
continue
print dropd.keys()
#dropinds=numpy.arange(len(dropd['Sample']))
try:
dropd['compositions']=numpy.array([dropd[elkey] for elkey in elkeys]).T
except:
dropd['compositions']=numpy.array([dropd[elkey] for elkey in ['A', 'B', 'C', 'D']]).T
unroundcompositions(dropd)
addcodetoplatemapgen1dlist(dlist=None, dropd=dropd)
dropinds=numpy.argsort(dropd['Sample'])
dropinds=dropinds[numpy.logical_not(numpy.isnan(dropd[expstr][dropinds]))]
x=dropd['x(mm)'][dropinds]+xshift
y=dropd['y(mm)'][dropinds]+yshift
fom=(dropd[expstr][dropinds]+fomshift)*fommult
comp=dropd['compositions'][dropinds]
code=dropd['code'][dropinds]
compsall+=list(comp)
fomall+=list(fom)
plateindall+=[count]*len(fom)
codeall+=list(code)
comp=numpy.array([a/a.sum() for a in comp])
if allmeasurements:
codeinds=numpy.where(code>-1)
elif allposn:
codeinds=numpy.where(code!=1)
else:
codeinds=numpy.where(code==0)
comp=comp[codeinds]
fom=fom[codeinds]
x=x[codeinds]
y=y[codeinds]
clip=True
for fcn, vstr in zip([cmap.set_over, cmap.set_under], [aboverangecolstr, belowrangecolstr]):
if len(vstr)==0:
continue
c=col_string(vstr)
fcn(c)
clip=False
norm=colors.Normalize(vmin=vmin, vmax=vmax, clip=clip)
print 'fom min, max, mean, std:', fom.min(), fom.max(), fom.mean(), fom.std()
if numpy.any(fom>vmax):
if numpy.any(fom<vmin):
extend='both'
else:
extend='max'
elif numpy.any(fom<vmin):
extend='min'
else:
extend='neither'
#ax=pylab.subplot(211)
#ax2=pylab.subplot(212)
#pylab.subplots_adjust(left=.03, right=.97, top=.97, bottom=.03, hspace=.01)
ax2=fig.add_subplot(len(dropdl), 1, count+1)
ax2.set_aspect(1)
mapbl=ax2.scatter(x, y, c=fom, s=60, marker='s', edgecolors='none', cmap=cmap, norm=norm)
ax2.set_xlim(x.min()-2, x.max()+2)
ax2.set_ylim(y.min()-2, y.max()+2)
ax2.set_title('plate %d' %(count+1))
#pylab.title('CP1Ess (V) Map')
figquat=pylab.figure(figsize=(8, 8))
stp = QuaternaryPlot(111, minlist=[0., 0., 0., 0.], ellabels=ellabels)
stp.scatter(comp, c=fom, s=pointsize, edgecolors='none', cmap=cmap, norm=norm)
stp.label(ha='center', va='center', fontsize=20)
stp.set_projection(azim=view_azim, elev=view_elev)
caxquat=figquat.add_axes((.83, .3, .04, .4))
cb=pylab.colorbar(stp.mappable, cax=caxquat, extend=extend)
cb.set_label(fomlabel, fontsize=16)
stp.ax.set_title('plate %d' %(count+1))
figquatall+=[figquat]
compsall=numpy.array(compsall)
fomall=numpy.array(fomall)
plateindall=numpy.array(plateindall)
codeall=numpy.array(codeall)
code0inds=numpy.where(codeall==0)
code02inds=numpy.where(codeall!=1)
code2inds=numpy.where(codeall==2)
if not permuteelements is None:
ellabels=[ellabels[i] for i in permuteelements]
compsall=compsall[:, permuteelements]
#fomall[fomall<0.3]=0.
if numpy.any(fomall>vmax):
if numpy.any(fomall<vmin):
extend='both'
else:
extend='max'
elif numpy.any(fomall<vmin):
extend='min'
else:
extend='neither'
fig.subplots_adjust(left=.05, bottom=.03, top=.96, right=.83, hspace=.14)
cax=fig.add_axes((.85, .3, .04, .4))
cb=pylab.colorbar(mapbl, cax=cax, extend=extend)
cb.set_label(fomlabel, fontsize=20)
axl, stpl=make10ternaxes(ellabels=ellabels)
pylab.figure(figsize=(8, 8))
stpquat=QuaternaryPlot(111, ellabels=ellabels)
#stpquat.scatter(compsall[code0inds], c=fomall[code0inds], s=20, edgecolors='none', cmap=cmap, norm=norm)
cols=stpquat.scalarmap(fomall[code0inds], norm, cmap)
stpquat.plotbycolor(compsall[code0inds], cols, marker='o', markersize=5, alpha=.3)#, markeredgecolor=None
scatter_10axes(compsall[code0inds], fomall[code0inds], stpl, s=18, edgecolors='none', cmap=cmap, norm=norm)
if labelquat:
stpquat.label(fontsize=20)
stpquat.set_projection(azim=view_azim, elev=view_elev)
axl30, stpl30=make30ternaxes(ellabels=ellabels)
scatter_30axes(compsall[code0inds], fomall[code0inds], stpl30, s=18, edgecolors='none', cmap=cmap, norm=norm)
axl_tern, stpl_tern=make4ternaxes(ellabels=ellabels)
scatter_4axes(compsall[code0inds], fomall[code0inds], stpl_tern, s=20, edgecolors='none', cmap=cmap, norm=norm)
axbin, axbininset=plotbinarylines_axandinset(linewidth=2, ellabels=ellabels)
plotbinarylines_quat(axbin, compsall[code0inds], fomall[code0inds], markersize=8, legloc=binarylegloc, ellabels=ellabels)
axbin.set_xlabel('binary composition', fontsize=16)
axbin.set_ylabel(fomlabel, fontsize=16)
figtemp=pylab.figure(stpquat.ax.figure.number)
cbax=figtemp.add_axes((.83, .3, .04, .4))
cb=pylab.colorbar(stpquat.mappable, cax=cbax, extend=extend)
cb.set_label(fomlabel, fontsize=16)
figtemp=pylab.figure(axl[0].figure.number)
cbax=figtemp.add_axes((.85, .3, .04, .4))
cb=pylab.colorbar(stpquat.mappable, cax=cbax, extend=extend)
cb.set_label(fomlabel, fontsize=16)
figtemp=pylab.figure(axl30[0].figure.number)
cbax=figtemp.add_axes((.91, .3, .03, .4))
cb=pylab.colorbar(stpquat.mappable, cax=cbax, extend=extend)
cb.set_label(fomlabel, fontsize=18)
figtemp=pylab.figure(axl_tern[0].figure.number)
cbax=figtemp.add_axes((.9, .3, .03, .4))
cb=pylab.colorbar(stpquat.mappable, cax=cbax, extend=extend)
cb.set_label(fomlabel, fontsize=16)
purelfig=pylab.figure()
linestyle=['-', '--', '-.', ':']
compsel=compsall[code2inds]
plateindel=plateindall[code2inds]
fomel=fomall[code2inds]
for count, col in enumerate(['c', 'm', 'y', 'k']):
c_el=compsel[:, count]
inds=numpy.where(c_el>0.)
c_el=c_el[inds]
cvl=list(set(c_el))
cvl.sort()
fom_el=fomel[inds]
plate_inds=plateindel[inds]
platefom_thick=[(plate_inds[c_el==cv], fom_el[c_el==cv]) for cv in cvl]
for thickcount, ((plate, fom_plate), ls) in enumerate(zip(platefom_thick, linestyle)):
indsp=numpy.argsort(plate)
plate=plate[indsp]+1
fom_plate=fom_plate[indsp]
if count==3 or thickcount==0:
pylab.plot(plate, fom_plate, col+ls, marker=r'$%d$' %(thickcount+1),markersize=13, label='%s,thick. %d' %(ellabels[count], thickcount+1))
# elif thickcount==0:
# pylab.plot([1, 2, 3], fom_plate, col+ls, marker=r'$%d$' %(thickcount+1),markersize=13, label='%s' %(ellabels[count],) )
else:
pylab.plot(plate, fom_plate, col+ls, marker=r'$%d$' %(thickcount+1),markersize=13)
pylab.xlim(.7, 4.3)
pylab.xlabel('plate number', fontsize=16)
pylab.ylabel(fomlabel, fontsize=16)
pylab.title('PURE ELEMENTS. color=element(CMYK). #=thickness', fontsize=18)
pylab.legend(loc=1)
if 1:
quatxfig=pylab.figure()
quatx=QuaternaryPlot(111, ellabels=ellabels, offset=0)
cbax=quatxfig.add_axes((.83, .3, .04, .4))
quatx.label()
compverts=[[.5,.5, 0, 0], [.5, 0, .5, 0], [0, 0, .1, .9]]
calctype=1
critdist=.05
betweenbool=1
invertbool=0
if calctype==0:
selectinds, distfromlin, lineparameter=quatx.filterbydistancefromline(compsall[code0inds], compverts[0], compverts[1], critdist, betweenpoints=betweenbool, invlogic=invertbool, returnall=True)
lineparameter=lineparameter[selectinds]
elif calctype==1:
selectinds, distfromplane, xyparr, xyp_verts,intriangle=quatx.filterbydistancefromplane(compsall[code0inds], compverts[0], compverts[1], compverts[2], critdist, withintriangle=betweenbool, invlogic=invertbool, returnall=True)
xyparr=xyparr[selectinds]
fomselectx=fomall[code0inds][selectinds]
compsselectx=compsall[code0inds][selectinds]
xsecfig=pylab.figure()
xsecax=pylab.subplot(111)
if calctype==0:
quatx.line(compverts[0], compverts[1])
quatx.plotfomalonglineparameter(xsecax, lineparameter, fomselectx, compend1=compverts[0], compend2=compverts[1], lineparticks=numpy.linspace(0, 1, 4), ls='none', marker='.')
elif calctype==1:
quatx.plotfominselectedplane(xsecax, xyparr, fomselectx, xyp_verts=xyp_verts, vertcomps_labels=[compverts[0], compverts[1], compverts[2]], s=20, edgecolor='none', cmap=cmap, norm=norm)
quatx.line(compverts[0], compverts[1])
quatx.line(compverts[0], compverts[2])
quatx.line(compverts[2], compverts[1])
quatx.scatter(compsselectx, c=fomselectx, s=20, cmap=cmap, norm=norm, edgecolor='none')#vmin=vmin, vmax=vmax,
cb=quatxfig.colorbar(quatx.mappable, cax=cbax, extend=extend, cmap=cmap, norm=norm)
cb.set_label(fomlabel, fontsize=18)
quatx.set_projection(azim=view_azim, elev=view_elev)
if SYSTEM==1:
axbin.set_ylim(.23, .7)
if SYSTEM==6:
axbin.set_ylim(.38, .5)
if not os.path.exists(savefolder):
os.mkdir(savefolder)
os.chdir(savefolder)
if 1:
pylab.figure(fig.number)
pylab.savefig('%s_PlatesAll_Posn.png' %expstr)
for count, fg in enumerate(figquatall):
pylab.figure(fg.number)
pylab.savefig('%s_Plate%d_Quat.png' %(expstr, count+1))
pylab.figure(stpquat.ax.figure.number)
pylab.savefig('%s_PlatesAll_Quat.png' %expstr)
pylab.savefig('%s_PlatesAll_Quat.png' %expstr, dpi=600)
pylab.figure(axl[0].figure.number)
pylab.savefig('%s_stackedtern.png' %expstr)
pylab.figure(axl30[0].figure.number)
pylab.savefig('%s_stackedtern30.png' %expstr)
pylab.figure(axl_tern[0].figure.number)
pylab.savefig('%s_ternfaces.png' %expstr)
pylab.figure(axbin.figure.number)
pylab.savefig('%s_binaries.png' %expstr)
pylab.figure(purelfig.number)
pylab.savefig('%s_pureelements.png' %expstr)
if 0:
os.chdir(savefolder)
pylab.figure(fig.number)
pylab.savefig('%s_PlatesAll_Posn.eps' %expstr)
for count, fg in enumerate(figquatall):
pylab.figure(fg.number)
pylab.savefig('%s_Plate%d_Quat.eps' %(expstr, count+1))
pylab.figure(stpquat.ax.figure.number)
pylab.savefig('%s_PlatesAll_Quat.eps' %expstr)
#pylab.savefig('%s_PlatesAll_Quat.svg' %expstr)
pylab.figure(axl[0].figure.number)
pylab.savefig('%s_stackedtern.eps' %expstr)
pylab.figure(axl_tern[0].figure.number)
pylab.savefig('%s_ternfaces.eps' %expstr)
pylab.figure(axbin.figure.number)
pylab.savefig('%s_binaries.eps' %expstr)
pylab.figure(purelfig.number)
pylab.savefig('%s_pureelements.eps' %expstr)
if 0:
pylab.figure(stpquat.ax.figure.number)
pylab.savefig('%s_PlatesAll_Quat_hires.png' %expstr, dpi=600)
pylab.figure(axl[0].figure.number)
pylab.savefig('%s_stackedtern_hires.png' %expstr, dpi=600)
pylab.show()
| bsd-3-clause |
starsriver/ML | 3/decision tree.py | 1 | 1121 | # -*- coding: utf-8 -*-
from sklearn.feature_extraction import DictVectorizer
import csv
from sklearn import preprocessing
from sklearn import tree
allElectronicsData = open(r"./ALLElectronics.csv", "rb")
reader = csv.reader(allElectronicsData)
headers = reader.next()
# print(headers)
featrueList = []
labelList = []
for row in reader:
labelList.append(row[len(row) - 1])
rowDict = {}
for i in range(1, len(row) - 1):
rowDict[headers[i]] = row[i]
featrueList.append(rowDict)
# print(featrueList)
vec = DictVectorizer()
dummyX = vec.fit_transform(featrueList).toarray()
lb = preprocessing.LabelBinarizer()
dummyY = lb.fit_transform(labelList)
clf = tree.DecisionTreeClassifier(criterion="entropy")
clf = clf.fit(dummyX, dummyY)
with open(r"./ALLElectronics.dot", "w") as f:
f = tree.export_graphviz(
clf, feature_names=vec.get_feature_names(), out_file=f)
oneRowX = dummyX[0, :]
newRowX = oneRowX
newRowX[0] = 1
newRowX[2] = 0
predictedY = clf.predict(newRowX)
print(str(clf.predict([0, 0, 1, 0, 1, 1, 0, 0, 1, 0])))
print(str(predictedY))
# dot -Tpdf dotfile -o out.pdf
| bsd-2-clause |
gijs/solpy | solpy/thermal.py | 2 | 9875 | #!/usr/bin/env python
# Copyright (C) 2013 Daniel Thomas
#
# This program is free software. See terms in LICENSE file.
#pylint: skip-file
import numpy as np
from numpy import sin, cos, tan, arcsin, arccos, arctan, pi
from collectors import *
from datetime import datetime, timedelta, date
class location():
''' This should be merged with the pv location class...'''
def __init__(self, name='default',lat=0,lon=0,alt=0):
self.name = name
self.lat = lat # Latitude [degrees]
self.alt = alt # Altitude [m]
self.lon = lon # Longitude [degrees]
self.lon_s = round(lon/15)*15 # Standard longitude [degrees] # UNUSED??
self.phi = self.lat*pi/180.0 # Latitude, [radians]
class solar_day():
'''solar_day uses Duffie & Beckman solar position algorithm for 1 day's solar angles.
Day constants are floats, variables are numpy arrays.'''
def __init__(self, n, L, C, time=None):
# Constants:
self.n = n # Day of year
self.L = L # Location object
self.C = C # Collector object
self.delta = (23.45*pi/180) * sin(2*pi*(284+self.n)/365) # Declination (1.6.1)
# Variables: all numpy arrays of length 24h * 60min = 1440
if time == None: self.time = np.array([datetime(2000,1,1)+timedelta(days=n-1,minutes=m) for m in range(1440)])
else: self.time = time
self.omega = np.array([(t.hour+(t.minute/60.0)-12)*15.0*(pi/180.0) for t in self.time]) # Hour angle (15' per hour, a.m. -ve)
self.gamma_s = np.array([azimuth(self.L.phi,self.delta,m) for m in self.omega]) # Solar azimuth
self.theta_z = arccos( cos(self.L.phi)*cos(self.delta)*cos(self.omega) +
sin(self.L.phi)*sin(self.delta) ) # Zenith angle (1.6.5)
self.theta = arccos( cos(self.theta_z)*cos(self.C.beta) + # Angle of incidence on collector (1.6.3)
sin(self.theta_z)*sin(self.C.beta)*cos(self.gamma_s-self.C.gamma) )
self.R_b = cos(np.clip(self.theta,0,pi/2))/cos(np.clip(self.theta_z,0,1.55)) # (1.8.1) Ratio of radiation on sloped/horizontal surface
G_sc = 1367.0 # Solar constant [W/m2] (p.6)
G_on = G_sc * (1+0.033*cos(2.0*pi*n/365)) # Extraterrestial radiation, normal plane [W/m2] (1.4.1)
self.G_o = G_on * cos(self.theta_z) # Extraterrestial radiation, horizontal [W/m2] (1.10.1)
def clear_sky(self):
return np.array([clear_sky(self.n,t,self.L.alt) for t in self.theta_z])
def G_T_HDKR(self,G,G_d):
'''Calculates total radiation on a tilted plane, using the HDKR model'''
G_b = np.clip(G - G_d,0,1500)
A_i = G_b/self.G_o # Anisotropy index
np.seterr(divide='ignore',invalid='ignore') # Avoiding errors in divide by inf. below
f = np.sqrt(np.nan_to_num(G_b/G)) # Horizon brightening factor
np.seterr(divide='warn',invalid='warn')
G_T = ((G_b + G_d*A_i)*self.R_b + \
G_d * (1-A_i) * ((1+cos(self.C.beta))/2) * (1 + f*sin(self.C.beta/2)**3) + \
G*self.C.rho_g*((1-cos(self.C.beta))/2)) # Ground reflectance
return G_T
def R_b(self):
pass
def day_of_year(dtime):
'''returns n, the day of year, for a given python datetime object'''
if isinstance(dtime,int):
ndate = datetime.now().replace(month=1,day=1)+timedelta(days=dtime-1)
return ndate.date()
elif isinstance(dtime,datetime) or isinstance(dtime,date):
n = (dtime - dtime.replace(month=1,day=1)).days + 1
return n
else:
print('ERROR: day_of_year takes an int (n) or datetime.datetime object')
return ' '
def solar_time(n):
'''solar_time: returns E, for solar/local time offset.'''
B = ((n-1) * (360.0/365))*(pi/180.0) # See D&B p.11
E = 229.2 * (0.000075 + 0.001868*cos(B) - 0.032077*sin(B) - 0.014615*cos(2*B) - 0.04089*sin(2*B))
return E
def azimuth(phi,delta,omega):
''' Solar azimuth angle (from D&B eq. 1.6.6)
(Angle from South of the projection of beam radiation on the horizontal plane, W = +ve)
phi - Latitude (radians)
delta - Declination (radians)
omega - Hour angle (radians) '''
omega_ew = arccos(tan(delta)/tan(phi)) # E-W hour angle (1.6.6g)
if (abs(omega) < omega_ew): C_1 = 1
else: C_1 = -1
if (phi*(phi-delta) >= 0): C_2 = 1
else: C_2 = -1
if (omega >= 0): C_3 = 1
else: C_3 = -1
#gamma_sp = arctan( sin(omega) / (sin(phi)*cos(omega)-cos(phi)*tan(delta)) ) # Gives error!
theta_z = arccos( cos(phi)*cos(delta)*cos(omega) + sin(phi)*sin(delta) )
gamma_sp = arcsin( sin(omega)*cos(delta)/sin(theta_z) )
gamma_s = C_1*C_2*gamma_sp + C_3*((1.0-C_1*C_2)/2.0)*pi
return gamma_s
def clear_sky(n, theta_z, alt):
'''clear_sky: returns an estimate of the clear sky radiation for a given day, time.
output: G_c = [G_ct, G_cb, G_cd] '''
A = alt/1000.0 # Altitude [km]
r_0 = 0.95 # r_0 - r_k are correction factors from Table 2.8.1 (p.73)
r_1 = 0.98 # (for tropical climate;
r_k = 1.02
a_0 = r_0 * (0.4237 - 0.00821*((6.0-A)**2))
a_1 = r_1 * (0.5055 + 0.00595*((6.5-A)**2))
k = r_k * (0.2711 + 0.01858*((2.5-A)**2))
tau_b = a_0 + a_1*np.exp(-k/cos(theta_z)) # Transmission coeff. for beam (2.8.1), from Hottel
tau_d = 0.271 - 0.294*tau_b # Transmission coeff. for diffuse (2.8.5), Liu & Jordan
G_sc = 1367.0 # Solar constant [W/m2] (p.6)
G_on = G_sc * (1+0.033*cos(2.0*pi*n/365)) # Extraterrestial radiation, normal plane [W/m2] (1.4.1)
G_o = G_on * cos(theta_z) # Extraterrestial radiation, horizontal [W/m2] (1.10.1)
G_c = [0.0, 0.0, 0.0]
if (G_o > 0.0): G_c = [(tau_b+tau_d)*G_o, tau_b*G_o, tau_d*G_o]
return G_c
def Intercepted_Tang(S,I_nb,I_nd):
''' intercepted: Calculations for radiation intercepted by glass tube collector,
based on Tang, 2009
NB: Deviations from Tang's notation (to confirm with D&B used elsewhere):
lamba -> phi; phi -> gamma'''
# Collector Constants
D1 = S.C.c.t.D1
D2 = S.C.c.t.D2
B = S.C.c.B
s = S.C.S
# Solar position vectors
n_x = cos(S.delta)*cos(S.L.phi)*cos(S.omega) + sin(S.delta)*sin(S.L.phi)
n_y = -cos(S.delta)*sin(S.omega)
n_z = -cos(S.delta)*sin(S.L.phi)*cos(S.omega) + sin(S.delta)*cos(S.L.phi)
np_x = n_x*cos(S.C.beta) - (n_y*sin(S.C.gamma)+n_z*cos(S.C.gamma))*sin(S.C.beta)
np_y = n_y*cos(S.C.gamma) - n_z*sin(S.C.gamma)
np_z = n_x*sin(S.C.beta) + (n_y*sin(S.C.gamma)+n_z*cos(S.C.gamma))*cos(S.C.beta)
# Beam radiation, directly intercepted
Omega = {'T': arctan(abs(np_y/np_x)),
'H': arctan(abs(np_z/np_x)) }[S.C.c.type]
Omega_0 = arccos((D1+D2)/(2*B))
Omega_1 = arccos((D1-D2)/(2*B))
# f(Omega): added np_x condition to avoid after-sunset values, np.clip to cut -ve values
fOmega = np.zeros_like(np_x)
for i in range(len(Omega)):
if np_x[i] <= 0.0: fOmega[i] = 0.0
elif Omega[i] <= Omega_0: fOmega[i] = 1.0
elif Omega[i] >= Omega_1: fOmega[i] = 0.0
else: fOmega[i] = np.clip((B/D1)*cos(Omega[i]) + 0.5*(1-(D2/D1)),0,2)
cosOt = { 'T': np.sqrt(np_x*np_x + np_y*np_y),
'H': np.sqrt(np_x*np_x + np_z*np_z) }[S.C.c.type]
I_bt = D1 * cosOt * fOmega * I_nb
# Diffuse radiation, directly intercepted
if S.C.c.type == 'T':
piF = Omega_0 + 0.5*(1-(D2/D1))*(Omega_1-Omega_0) + (B/D1)*(sin(Omega_1)-sin(Omega_0))
I_dB = 0.5 * (1+cos(S.C.beta)) * I_nd
elif S.C.c.type == 'H':
if ((pi/2)-S.C.beta) >= Omega_0: piF = Omega_0 + (1-(D2/D1))*((pi/2) - S.C.beta + Omega_1 - 2*Omega_0)/4 + \
(B/(2*D1))*(sin(Omega_1)+cos(S.C.beta)-2*sin(Omega_0))
else: piF = 0.5*(Omega_0+(pi/2)-S.C.beta) + (1-(D2/D1))*(Omega_1 - Omega_0)/4 + \
(B/(2*D1))*(sin(Omega_1) - sin(Omega_0))
I_dB = I_nd
I_dt = D1 * piF * I_dB
# Beam radiation, reflected from DFR
W = B - (D2/cos(Omega)) # Eq. 18
C = 2*s/D2
dx = s * tan(Omega) - D2/(2*cos(Omega)) # (Eq. 23)
# # Debugging plot:
# import matplotlib.pyplot as plt
# import matplotlib.dates as mdates
# fig = plt.figure(1, figsize=(14,8))
# ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
# t = S.time[1]
# ax.plot(S.time,I_bt,'red', label=r'$I_{bt}$',linewidth=2)
# ax.plot(S.time,I_dt,'orange',label=r'$I_{dt}$',linewidth=2)
# ax.plot(S.time,np_x,'blue', label=r'$n^{\prime}_x$',linewidth=1)
# ax.plot(S.time,fOmega,'green', label=r'$f(\Omega)$',linewidth=2)
# ax.plot(S.time,Omega,'black', label=r'$\Omega$',linewidth=1)
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
# ax.set_xlim((t.replace(hour=6,minute=0),t.replace(hour=20,minute=0)))
# ax.legend()
# ax.set_ylim((-1,5))
# plt.show()
# Total intercepted radiation for entire collector array
I_total = S.C.N_tubes*S.C.c.t.L_ab * (I_bt + I_dt)
return I_total
def F(y):
C = 2*s/D2
dx = s * tan(Omega) - D2/(2*cos(Omega)) # (Eq. 23)
A1 = (2*(dx - y))/D2
A2 = (2*(B + dx - y))/D2
theta1 = arctan((A1*C + (A1**2 * C**2 - (A1**2 - 1)*(C**2 - 1))**0.5)/(C**2 - 1))
theta2 = arctan((-A2*C +(A2**2 * C**2 - (1 - A2**2)*(1 - C**2))**0.5)/(1 - C**2))
| lgpl-2.1 |
matousc89/padasip | padasip/filters/ap.py | 1 | 8992 | """
.. versionadded:: 0.4
.. versionchanged:: 1.0.0
The Affine Projection (AP) algorithm is implemented according to paper
:cite:`gonzalez2012affine`. Usage of this filter should be benefical especially
when input data is highly correlated.
This filter is based on LMS. The difference is,
that AP uses multiple input vectors in every sample.
The number of vectors is called projection order.
In this implementation the historic input vectors from input matrix are used
as the additional input vectors in every sample.
The AP filter can be created as follows
>>> import padasip as pa
>>> pa.filters.FilterAP(n)
where `n` is the size of the filter.
Content of this page:
.. contents::
:local:
:depth: 1
.. seealso:: :ref:`filters`
Algorithm Explanation
======================================
The input for AP filter is created as follows
:math:`\\textbf{X}_{AP}(k) = (\\textbf{x}(k), ..., \\textbf{x}(k-L))`,
where :math:`\\textbf{X}_{AP}` is filter input, :math:`L` is projection order,
:math:`k` is discrete time index and \textbf{x}_{k} is input vector.
The output of filter si calculated as follows:
:math:`\\textbf{y}_{AP}(k) = \\textbf{X}^{T}_{AP}(k) \\textbf{w}(k)`,
where :math:`\\textbf{x}(k)` is the vector of filter adaptive parameters.
The vector of targets is constructed as follows
:math:`\\textbf{d}_{AP}(k) = (d(k), ..., d(k-L))^T`,
where :math:`d(k)` is target in time :math:`k`.
The error of the filter is estimated as
:math:`\\textbf{e}_{AP}(k) = \\textbf{d}_{AP}(k) - \\textbf{y}_{AP}(k)`.
And the adaptation of adaptive parameters is calculated according to equation
:math:`\\textbf{w}_{AP}(k+1) =
\\textbf{w}_{AP}(k+1) + \mu \\textbf{X}_{AP}(k) (\\textbf{X}_{AP}^{T}(k)
\\textbf{X}_{AP}(k) + \epsilon \\textbf{I})^{-1} \\textbf{e}_{AP}(k)`.
During the filtering we are interested just in output of filter :math:`y(k)`
and the error :math:`e(k)`. These two values are the first elements in
vectors: :math:`\\textbf{y}_{AP}(k)` for output and
:math:`\\textbf{e}_{AP}(k)` for error.
Minimal Working Example
======================================
If you have measured data you may filter it as follows
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import padasip as pa
# creation of data
N = 500
x = np.random.normal(0, 1, (N, 4)) # input matrix
v = np.random.normal(0, 0.1, N) # noise
d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
# identification
f = pa.filters.FilterAP(n=4, order=5, mu=0.5, eps=0.001, w="random")
y, e, w = f.run(d, x)
# show results
plt.figure(figsize=(15,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
plt.plot(d,"b", label="d - target")
plt.plot(y,"g", label="y - output");plt.legend()
plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
plt.tight_layout()
plt.show()
An example how to filter data measured in real-time
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import padasip as pa
# these two function supplement your online measurment
def measure_x():
# it produces input vector of size 3
x = np.random.random(3)
return x
def measure_d(x):
# meausure system output
d = 2*x[0] + 1*x[1] - 1.5*x[2]
return d
N = 100
log_d = np.zeros(N)
log_y = np.zeros(N)
filt = pa.filters.FilterAP(3, mu=1.)
for k in range(N):
# measure input
x = measure_x()
# predict new value
y = filt.predict(x)
# do the important stuff with prediction output
pass
# measure output
d = measure_d(x)
# update filter
filt.adapt(d, x)
# log values
log_d[k] = d
log_y[k] = y
### show results
plt.figure(figsize=(15,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
plt.plot(log_d,"b", label="d - target")
plt.plot(log_y,"g", label="y - output");plt.legend()
plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
plt.plot(10*np.log10((log_d-log_y)**2),"r", label="e - error [dB]")
plt.legend(); plt.tight_layout(); plt.show()
References
======================================
.. bibliography:: ap.bib
:style: plain
Code Explanation
======================================
"""
import numpy as np
from padasip.filters.base_filter import AdaptiveFilter
class FilterAP(AdaptiveFilter):
"""
Adaptive AP filter.
**Args:**
* `n` : length of filter (integer) - how many input is input array
(row of input matrix)
**Kwargs:**
* `order` : projection order (integer) - how many input vectors
are in one input matrix
* `mu` : learning rate (float). Also known as step size.
If it is too slow,
the filter may have bad performance. If it is too high,
the filter will be unstable. The default value can be unstable
for ill-conditioned input data.
* `eps` : initial offset covariance (float)
* `w` : initial weights of filter. Possible values are:
* array with initial weights (1 dimensional array) of filter size
* "random" : create random weights
* "zeros" : create zero value weights
"""
def __init__(self, n, order=5, mu=0.1, eps=0.001, w="random"):
self.kind = "AP filter"
self.n = self.check_int(
n,'The size of filter must be an integer')
self.order = self.check_int(
order, 'The order of projection must be an integer')
self.mu = self.check_float_param(mu, 0, 1000, "mu")
self.eps = self.check_float_param(eps, 0, 1000, "eps")
self.init_weights(w, self.n)
self.w_history = False
self.x_mem = np.zeros((self.n, self.order))
self.d_mem = np.zeros(order)
self.ide_eps = self.eps * np.identity(self.order)
self.ide = np.identity(self.order)
self.y_mem = False
self.e_mem = False
def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
# create input matrix and target vector
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d
# estimate output and error
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
# update
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw
def run(self, d, x):
"""
This function filters multiple samples in a row.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples,
columns are input arrays.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample.
"""
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
self.n = len(x[0])
# prepare data
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
y = np.zeros(N)
e = np.zeros(N)
self.w_history = np.zeros((N,self.n))
# adaptation loop
for k in range(N):
self.w_history[k,:] = self.w
# create input matrix and target vector
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x[k]
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d[k]
# estimate output and error
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
y[k] = self.y_mem[0]
e[k] = self.e_mem[0]
# update
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw
return y, e, self.w_history
| mit |
klingebj/regreg | doc/source/conf.py | 1 | 7753 | # emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# sampledoc documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 3 12:40:24 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# We load the nipy release info into a dict by explicit execution
rel = {}
execfile('../../code/regreg/info.py', rel)
# Import support for ipython console session syntax highlighting (lives
# in the sphinxext directory defined above)
import ipython_console_highlighting
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'ipython_console_highlighting',
'ipython_directive',
'inheritance_diagram',
'math_dollar'
]
# Current version (as of 11/2010) of numpydoc is only compatible with sphinx >
# 1.0. We keep copies of this version in 'numpy_ext'. For a while we will also
# keep a copy of the older numpydoc version to allow compatibility with sphinx
# 0.6
try:
# With older versions of sphinx, this causes a crash
import numpy_ext.numpydoc
except ImportError:
# Older version of sphinx
extensions.append('numpy_ext_old.numpydoc')
else: # probably sphinx >= 1.0
extensions.append('numpy_ext.numpydoc')
autosummary_generate=True
# Matplotlib sphinx extensions
# ----------------------------
# Currently we depend on some matplotlib extentions that are only in
# the trunk, so we've added copies of these files to fall back on,
# since most people install releases. Once theses extensions have
# been released for a while we should remove this hack. I'm assuming
# any modifications to these extensions will be done upstream in
# matplotlib! The matplotlib trunk will have more bug fixes and
# feature updates so we'll try to use that one first.
from matplotlib import rc
rc('text', usetex=True)
import matplotlib.sphinxext
try:
import matplotlib.sphinxext
extensions.append('matplotlib.sphinxext.only_directives')
extensions.append('matplotlib.sphinxext.plot_directive')
except ImportError:
extensions.append('only_directives')
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'regreg'
copyright = '2011, B. Klingenberg & J. Taylor'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = rel['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directories, that shouldn't
# be searched for source files.
# exclude_trees = []
# what to put into API doc (just class doc, just init, or both)
autoclass_content = 'class'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
#
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'regreg.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'RegReg Documentation'
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = project
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('documentation', 'regreg.tex', 'RegReg Documentation',
ur'B. Klingenberg & J. Taylor.','manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
latex_preamble = """
\usepackage{amsmath}
\usepackage{amssymb}
\newcommand{\real}{\mathbb{R}}
% Uncomment these two if needed
%\usepackage{amsfonts}
%\usepackage{txfonts}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
| bsd-3-clause |
nelson-liu/scikit-learn | sklearn/neighbors/classification.py | 27 | 14358 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
yarikoptic/pystatsmodels | statsmodels/datasets/scotland/data.py | 3 | 2910 | """Taxation Powers Vote for the Scottish Parliament 1997 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "Taxation Powers Vote for the Scottish Parliamant 1997"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Taxation Powers' Yes Vote for Scottish Parliamanet-1997"""
DESCRLONG = """
This data is based on the example in Gill and describes the proportion of
voters who voted Yes to grant the Scottish Parliament taxation powers.
The data are divided into 32 council districts. This example's explanatory
variables include the amount of council tax collected in pounds sterling as
of April 1997 per two adults before adjustments, the female percentage of
total claims for unemployment benefits as of January, 1998, the standardized
mortality rate (UK is 100), the percentage of labor force participation,
regional GDP, the percentage of children aged 5 to 15, and an interaction term
between female unemployment and the council tax.
The original source files and variable information are included in
/scotland/src/
"""
NOTE = """
Number of Observations - 32 (1 for each Scottish district)
Number of Variables - 8
Variable name definitions::
YES - Proportion voting yes to granting taxation powers to the Scottish
parliament.
COUTAX - Amount of council tax collected in pounds steling as of April '97
UNEMPF - Female percentage of total unemployment benefits claims as of
January 1998
MOR - The standardized mortality rate (UK is 100)
ACT - Labor force participation (Short for active)
GDP - GDP per county
AGE - Percentage of children aged 5 to 15 in the county
COUTAX_FEMALEUNEMP - Interaction between COUTAX and UNEMPF
Council district names are included in the data file, though are not returned
by load.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the Scotvote data and returns a Dataset instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the Scotvote data and returns a Dataset instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = np.recfromtxt(open(filepath + '/scotvote.csv',"rb"), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))
return data
| bsd-3-clause |
chanceraine/nupic.research | htmresearch/frameworks/union_pooling/activation/plotExciteDecayFunctions.py | 1 | 1930 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy
"""
This script plots different activation and decay functions and saves the
resulting figures to a pdf document "excitation_decay_functions.pdf"
"""
with PdfPages('excitation_decay_functions.pdf') as pdf:
plt.figure()
plt.subplot(2,2,1)
from union_pooling.activation.excite_functions.excite_functions_all import (
LogisticExciteFunction)
self = LogisticExciteFunction()
self.plot()
plt.xlabel('Predicted Input #')
from union_pooling.activation.decay_functions.decay_functions_all import (
ExponentialDecayFunction)
plt.subplot(2,2,2)
self = ExponentialDecayFunction(10.0)
self.plot()
pdf.savefig()
plt.close()
# from union_pooling.activation.decay_functions.logistic_decay_function import (
# LogisticDecayFunction)
# plt.figure()
# self = LogisticDecayFunction(10.0)
# self.plot()
# pdf.savefig()
# plt.close()
| agpl-3.0 |
r24mille/think_stats | chapter_four/rankit.py | 3 | 1762 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
import thinkstats
import myplot
import matplotlib.pyplot as pyplot
def Sample(n=6):
"""Generates a sample from a standard normal variate.
n: sample size
Returns: list of n floats
"""
t = [random.normalvariate(0.0, 1.0) for i in range(n)]
t.sort()
return t
def Samples(n=6, m=1000):
"""Generates m samples with size n each.
n: sample size
m: number of samples
Returns: list of m samples
"""
t = [Sample(n) for i in range(m)]
return t
def EstimateRankits(n=6, m=1000):
"""Estimates the expected values of sorted random samples.
n: sample size
m: number of iterations
Returns: list of n rankits
"""
t = Samples(n, m)
t = zip(*t)
means = [thinkstats.Mean(x) for x in t]
return means
def MakeNormalPlot(ys, root=None, line_options={}, **options):
"""Makes a normal probability plot.
Args:
ys: sequence of values
line_options: dictionary of options for pyplot.plot
options: dictionary of options for myplot.Save
"""
# TODO: when n is small, generate a larger sample and desample
n = len(ys)
xs = [random.normalvariate(0.0, 1.0) for i in range(n)]
pyplot.clf()
pyplot.plot(sorted(xs), sorted(ys), 'b.', markersize=3, **line_options)
myplot.Save(root,
xlabel = 'Standard normal values',
legend=False,
**options)
def main():
means = EstimateRankits(84)
print means
if __name__ == "__main__":
main()
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/stats/tests/test_var.py | 7 | 2792 | # flake8: noqa
from __future__ import print_function
import pandas.util.testing as tm
from pandas.compat import range
import nose
import unittest
raise nose.SkipTest('skipping this for now')
try:
import statsmodels.tsa.var as sm_var
import statsmodels as sm
except ImportError:
import scikits.statsmodels.tsa.var as sm_var
import scikits.statsmodels as sm
import pandas.stats.var as _pvar
reload(_pvar)
from pandas.stats.var import VAR
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
class CheckVAR(object):
def test_params(self):
tm.assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_neqs(self):
tm.assert_numpy_array_equal(self.res1.neqs, self.res2.neqs)
def test_nobs(self):
tm.assert_numpy_array_equal(self.res1.avobs, self.res2.nobs)
def test_df_eq(self):
tm.assert_numpy_array_equal(self.res1.df_eq, self.res2.df_eq)
def test_rmse(self):
results = self.res1.results
for i in range(len(results)):
tm.assert_almost_equal(results[i].mse_resid ** .5,
eval('self.res2.rmse_' + str(i + 1)),
DECIMAL_6)
def test_rsquared(self):
results = self.res1.results
for i in range(len(results)):
tm.assert_almost_equal(results[i].rsquared,
eval('self.res2.rsquared_' + str(i + 1)),
DECIMAL_3)
def test_llf(self):
results = self.res1.results
tm.assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)
for i in range(len(results)):
tm.assert_almost_equal(results[i].llf,
eval('self.res2.llf_' + str(i + 1)),
DECIMAL_2)
def test_aic(self):
tm.assert_almost_equal(self.res1.aic, self.res2.aic)
def test_bic(self):
tm.assert_almost_equal(self.res1.bic, self.res2.bic)
def test_hqic(self):
tm.assert_almost_equal(self.res1.hqic, self.res2.hqic)
def test_fpe(self):
tm.assert_almost_equal(self.res1.fpe, self.res2.fpe)
def test_detsig(self):
tm.assert_almost_equal(self.res1.detomega, self.res2.detsig)
def test_bse(self):
tm.assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
class Foo(object):
def __init__(self):
data = sm.datasets.macrodata.load()
data = data.data[['realinv', 'realgdp', 'realcons']].view((float, 3))
data = diff(log(data), axis=0)
self.res1 = VAR2(endog=data).fit(maxlag=2)
from results import results_var
self.res2 = results_var.MacrodataResults()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
johannfaouzi/pyts | pyts/multivariate/image/joint_rp.py | 1 | 5299 | """Joint Recurrence Plots."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
import numpy as np
from sklearn.base import BaseEstimator
from ...base import MultivariateTransformerMixin
from ...image import RecurrencePlot
from ..utils import check_3d_array
class JointRecurrencePlot(BaseEstimator, MultivariateTransformerMixin):
r"""Joint Recurrence Plot.
A recurrence plot is an image representing the distances between
trajectories extracted from the original time series.
A joint recurrence plot is an extension of recurrence plots for
multivariate time series: it is the Hadamard of the recurrence
plots obtained for each feature of the multivariate time series.
Parameters
----------
dimension : int or float (default = 1)
Dimension of the trajectory. If float, it represents a percentage of
the size of each time series and must be between 0 and 1.
time_delay : int or float (default = 1)
Time gap between two back-to-back points of the trajectory. If
float, it represents a percentage of the size of each time series and
must be between 0 and 1.
threshold : float, 'point', 'distance' or None or list thereof (default = None)
Threshold for the minimum distance. If None, the recurrence plots
are not binarized. If ``threshold='point'``, the threshold is computed
such as ``percentage`` percents of the points are smaller than the
threshold. If ``threshold='distance'``, the threshold is computed as
the ``percentage`` of the maximum distance.
percentage : int, float or list thereof (default = 10)
Percentage of black points if ``threshold='point'`` or percentage of
maximum distance for threshold if ``threshold='distance'``.
Ignored if ``threshold`` is a float or None. Note that the percentage
is calculated for each recurrence plot independently, which implies
that there will probably be less than `percentage` percents of black
points in the joint recurrence plot.
References
----------
.. [1] M. Romano, M. Thiel, J. Kurths and W. con Bloh, "Multivariate
Recurrence Plots". Physics Letters A (2004)
Examples
--------
>>> from pyts.datasets import load_basic_motions
>>> from pyts.multivariate.image import JointRecurrencePlot
>>> X, _, _, _ = load_basic_motions(return_X_y=True)
>>> transformer = JointRecurrencePlot()
>>> X_new = transformer.transform(X)
>>> X_new.shape
(40, 100, 100)
""" # noqa: E501
def __init__(self, dimension=1, time_delay=1, threshold=None,
percentage=10):
self.dimension = dimension
self.time_delay = time_delay
self.threshold = threshold
self.percentage = percentage
def fit(self, X=None, y=None):
"""Pass.
Parameters
----------
X
Ignored
y
Ignored
Returns
-------
self : object
"""
return self
def transform(self, X):
"""Transform each time series into a joint recurrence plot.
Parameters
----------
X : array-like, shape = (n_samples, n_features, n_timestamps)
Multivariate time series.
Returns
-------
X_new : array, shape = (n_samples, image_size, image_size)
Joint Recurrence plots. ``image_size`` is the number of
trajectories and is equal to
``n_timestamps - (dimension - 1) * time_delay``.
"""
X = check_3d_array(X)
_, n_features, _ = X.shape
thresholds_, percentages_ = self._check_params(n_features)
X_rp = [self._joint_recurrence_plot(
X[:, i, :], self.dimension, self.time_delay,
thresholds_[i], percentages_[i]) for i in range(n_features)]
X_jrp = np.product(X_rp, axis=0)
return X_jrp
@staticmethod
def _joint_recurrence_plot(X, dimension, time_delay,
threshold, percentage):
recurrence_plot = RecurrencePlot(
dimension, time_delay, threshold, percentage)
return recurrence_plot.transform(X)
def _check_params(self, n_features):
if isinstance(self.threshold, (tuple, list, np.ndarray)):
if len(self.threshold) != n_features:
raise ValueError(
"If 'threshold' is a list, its length must be equal to "
"n_features ({0} != {1})."
.format(len(self.threshold), n_features)
)
thresholds_ = self.threshold
else:
thresholds_ = [self.threshold for _ in range(n_features)]
if isinstance(self.percentage, (tuple, list, np.ndarray)):
if len(self.percentage) != n_features:
raise ValueError(
"If 'percentage' is a list, its length must be equal to "
"n_features ({0} != {1})."
.format(len(self.percentage), n_features)
)
percentages_ = self.percentage
else:
percentages_ = [self.percentage for _ in range(n_features)]
return thresholds_, percentages_
| bsd-3-clause |
RachitKansal/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
mojoboss/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
dimkal/mne-python | examples/inverse/plot_covariance_whitening_dspm.py | 10 | 6375 | # doc:slow-example
"""
===================================================
Demonstrate impact of whitening on source estimates
===================================================
This example demonstrates the relationship between the noise covariance
estimate and the MNE / dSPM source amplitudes. It computes source estimates for
the SPM faces data and compares proper regularization with insufficient
regularization based on the methods described in [1]. The example demonstrates
that improper regularization can lead to overestimation of source amplitudes.
This example makes use of the previous, non-optimized code path that was used
before implementing the suggestions presented in [1]. Please do not copy the
patterns presented here for your own analysis, this is example is purely
illustrative.
Note that this example does quite a bit of processing, so even on a
fast machine it can take a couple of minutes to complete.
References
----------
[1] Engemann D. and Gramfort A. (2015) Automated model selection in covariance
estimation and spatial whitening of MEG and EEG signals, vol. 108,
328-342, NeuroImage.
"""
# Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
from scipy.misc import imread
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
##############################################################################
# Get data
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
raw = io.Raw(raw_fname % 1, preload=True) # Take first run
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 30, method='iir', n_jobs=1)
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = None # no baseline as high-pass is applied
reject = dict(mag=3e-12)
# Make source space
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir,
overwrite=True, add_dist=False)
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(raw.info, trans, src, bem)
forward = mne.convert_forward_solution(forward, surf_ori=True)
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
clim = dict(kind='value', lims=[0, 2.5, 5])
###############################################################################
# Estimate covariance and show resulting source estimates
method = 'empirical', 'shrunk'
best_colors = 'steelblue', 'red'
samples_epochs = 5, 15,
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 6))
def brain_to_mpl(brain):
"""convert image to be usable with matplotlib"""
tmp_path = op.abspath(op.join(op.curdir, 'my_tmp'))
brain.save_imageset(tmp_path, views=['ven'])
im = imread(tmp_path + '_ven.png')
os.remove(tmp_path + '_ven.png')
return im
for n_train, (ax_stc_worst, ax_dynamics, ax_stc_best) in zip(samples_epochs,
(axes1, axes2)):
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
for id_ in [event_ids[k] for k in conditions]])
epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
epochs_train.equalize_event_counts(event_ids, copy=False)
noise_covs = compute_covariance(epochs_train, method=method,
tmin=None, tmax=0, # baseline only
return_estimators=True) # returns list
# prepare contrast
evokeds = [epochs_train[k].average() for k in conditions]
# compute stc based on worst and best
for est, ax, kind, color in zip(noise_covs, (ax_stc_worst, ax_stc_best),
['best', 'worst'], best_colors):
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
inverse_operator = make_inverse_operator(epochs_train.info, forward,
est, loose=0.2, depth=0.8,
rank=274)
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds)
stc = stc_a - stc_b
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim)
brain.set_time(175)
im = brain_to_mpl(brain)
brain.close()
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(est['method'], kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set_title('{0} epochs'.format(n_train * 2))
ax_dynamics.set_xlabel('Time (ms)')
ax_dynamics.set_ylabel('Source Activation (dSPM)')
ax_dynamics.set_xlim(tmin * 1e3, tmax * 1e3)
ax_dynamics.set_ylim(-3, 3)
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.4, left=0.03, right=0.98, wspace=0.07)
fig.canvas.draw()
fig.show()
| bsd-3-clause |
ProstoMaxim/incubator-airflow | tests/contrib/hooks/test_bigquery_hook.py | 16 | 8098 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks import bigquery_hook as hook
from oauth2client.contrib.gce import HttpAccessTokenRefreshError
bq_available = True
try:
hook.BigQueryHook().get_service()
except HttpAccessTokenRefreshError:
bq_available = False
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('pandas_gbq.gbq.GenericGBQException: Reason: invalidQuery',
str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
# since we passed 'json' in, and it's not valid, make sure it's present in the error string.
self.assertIn("JSON", str(context.exception))
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
public-ink/public-ink | server/appengine/lib/matplotlib/backends/backend_mixed.py | 10 | 5577 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import six
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.tight_bbox import process_figure_for_rasterizing
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, figure, width, height, dpi, vector_renderer,
raster_renderer_class=None,
bbox_inches_restore=None):
"""
figure: The figure instance.
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
# A reference to the figure is needed as we need to change
# the figure dpi before and after the rasterization. Although
# this looks ugly, I couldn't find a better solution. -JJL
self.figure = figure
self._figdpi = figure.get_dpi()
self._bbox_inches_restore = bbox_inches_restore
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
start_filter stop_filter draw_gouraud_triangle
draw_gouraud_triangles option_scale_image
_text2path _get_text_path_transform height width
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
# change the dpi of the figure temporarily.
self.figure.set_dpi(self.dpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore)
self._bbox_inches_restore = r
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
height = self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = np.frombuffer(buffer, dtype=np.uint8)
image = image.reshape((h, w, 4))
image = image[::-1]
gc = self._renderer.new_gc()
# TODO: If the mixedmode resolution differs from the figure's
# dpi, the image must be scaled (dpi->_figdpi). Not all
# backends support this.
self._renderer.draw_image(
gc,
float(l) / self.dpi * self._figdpi,
(float(height)-b-h) / self.dpi * self._figdpi,
image)
self._raster_renderer = None
self._rasterizing = False
# restore the figure dpi.
self.figure.set_dpi(self._figdpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._figdpi)
self._bbox_inches_restore = r
| gpl-3.0 |
jorendorff/dht | plot_speed.py | 1 | 1113 | from __future__ import division
import sys
import matplotlib.pyplot as plt
import numpy
import json
def main(filename):
with open(filename) as f:
data = json.load(f)
# plot the graph and save it
for testname, results in data.items():
fig = plt.figure()
fig.suptitle(testname)
axes = fig.gca()
axes.set_ylabel('speed (operations/second)')
hi = max(max(x/y for x, y in series) for series in results.values())
axes.set_ylim(bottom=0, top=hi * 1.2)
axes.set_xlabel('number of operations')
def show(data, *args, **kwargs):
xs = [x for x, y in data]
ys = [x/y for x, y in data]
axes.plot(xs, ys, *args, **kwargs)
if 'DenseTable' in results:
show(results['DenseTable'], '-o', color='#cccccc', label='dense_hash_map (open addressing)')
show(results['OpenTable'], 'b-o', label='open addressing')
show(results['CloseTable'], 'r-o', label='Close table')
axes.legend(loc='best')
fig.savefig(testname + "-speed.png", format='png')
main(sys.argv[1])
| mit |
dynaryu/rmtk | rmtk/parsers/exposure_model_converter.py | 3 | 11771 | #!/usr/bin/env python
# LICENSE
#
# Copyright (c) 2014, GEM Foundation, Anirudh Rao
#
# The rmtk is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software rmtk provided herein is released as a prototype
# implementation on behalf of scientists and engineers working within the GEM
# Foundation (Global Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the risk scientific staff of the GEM Model Facility
# ([email protected]).
#
# The nrml_converters is therefore distributed WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# The GEM Foundation, and the authors of the software, assume no liability for
# use of the software.
"""
Convert exposure model csv files to xml.
"""
import os
import csv
import math
import argparse
import pandas as pd
from lxml import etree
NAMESPACE = 'http://openquake.org/xmlns/nrml/0.4'
GML_NAMESPACE = 'http://www.opengis.net/gml'
SERIALIZE_NS_MAP = {None: NAMESPACE, 'gml': GML_NAMESPACE}
def csv_to_xml(input_csv, metadata_csv, output_xml):
"""
Converts the CSV exposure model file to the NRML format
"""
metadata = {}
data = pd.io.parsers.read_csv(input_csv)
with open(metadata_csv, 'rU') as f:
reader = csv.reader(f)
for row in reader:
metadata[row[0]] = row[1]
with open(output_xml, "w") as f:
root = etree.Element('nrml', nsmap=SERIALIZE_NS_MAP)
node_em = etree.SubElement(root, "exposureModel")
node_em.set("id", metadata['id'])
node_em.set("category", metadata['category'])
node_em.set("taxonomySource", metadata['taxonomy_source'])
node_desc = etree.SubElement(node_em, "description")
node_desc.text = metadata['description']
node_conv = etree.SubElement(node_em, "conversions")
node_cost_types = etree.SubElement(node_conv, "costTypes")
node_cost_type_s = etree.SubElement(node_cost_types, "costType")
node_cost_type_s.set("name", "structural")
node_cost_type_s.set("type", metadata['structural_cost_aggregation_type'])
node_cost_type_s.set("unit", metadata['structural_cost_currency'])
if metadata['nonstructural_cost_aggregation_type']:
node_cost_type_ns = etree.SubElement(node_cost_types, "costType")
node_cost_type_ns.set("name", "nonstructural")
node_cost_type_ns.set("type", metadata['nonstructural_cost_aggregation_type'])
node_cost_type_ns.set("unit", metadata['nonstructural_cost_currency'])
if metadata['contents_cost_aggregation_type']:
node_cost_type_c = etree.SubElement(node_cost_types, "costType")
node_cost_type_c.set("name", "contents")
node_cost_type_c.set("type", metadata['contents_cost_aggregation_type'])
node_cost_type_c.set("unit", metadata['contents_cost_currency'])
if metadata['insurance_deductible_is_absolute']:
node_deductible = etree.SubElement(node_conv, "deductible")
node_deductible.set("isAbsolute", metadata['insurance_deductible_is_absolute'].lower())
if metadata['insurance_limit_is_absolute']:
node_limit= etree.SubElement(node_conv, "insuranceLimit")
node_limit.set("isAbsolute", metadata['insurance_limit_is_absolute'].lower())
node_assets = etree.SubElement(node_em, "assets")
for row_index, row in data.iterrows():
node_asset = etree.SubElement(node_assets, "asset")
node_asset.set("id", str(row['asset_id']))
node_asset.set("number", str(row['num_buildings']))
node_asset.set("area", str(row['built_up_area']))
node_asset.set("taxonomy", str(row['taxonomy']))
node_location = etree.SubElement(node_asset, "location")
node_location.set("lon", str(row['longitude']))
node_location.set("lat", str(row['latitude']))
node_costs = etree.SubElement(node_asset, "costs")
if not math.isnan(row['structural_replacement_cost']):
node_cost_s = etree.SubElement(node_costs, "cost")
node_cost_s.set("type", 'structural')
node_cost_s.set("value", str(row['structural_replacement_cost']))
if not math.isnan(row['structural_insurance_deductible']):
node_cost_s.set("deductible", str(row['structural_insurance_deductible']))
if not math.isnan(row['structural_insurance_limit']):
node_cost_s.set("insuranceLimit", str(row['structural_insurance_limit']))
if not math.isnan(row['structural_retrofit_cost']):
node_cost_s.set("retrofitted", str(row['structural_retrofit_cost']))
if not math.isnan(row['nonstructural_replacement_cost']):
node_cost_ns = etree.SubElement(node_costs, "cost")
node_cost_ns.set("type", 'nonstructural')
node_cost_ns.set("value", str(row['nonstructural_replacement_cost']))
if not math.isnan(row['nonstructural_insurance_deductible']):
node_cost_ns.set("deductible", str(row['nonstructural_insurance_deductible']))
if not math.isnan(row['nonstructural_insurance_limit']):
node_cost_ns.set("insuranceLimit", str(row['nonstructural_insurance_limit']))
if not math.isnan(row['nonstructural_retrofit_cost']):
node_cost_ns.set("retrofitted", str(row['nonstructural_retrofit_cost']))
if not math.isnan(row['contents_replacement_cost']):
node_cost_c = etree.SubElement(node_costs, "cost")
node_cost_c.set("type", 'contents')
node_cost_c.set("value", str(row['contents_replacement_cost']))
if not math.isnan(row['contents_insurance_deductible']):
node_cost_c.set("deductible", str(row['contents_insurance_deductible']))
if not math.isnan(row['contents_insurance_limit']):
node_cost_c.set("insuranceLimit", str(row['contents_insurance_limit']))
if not math.isnan(row['contents_retrofit_cost']):
node_cost_c.set("retrofitted", str(row['contents_retrofit_cost']))
if not math.isnan(row['downtime_cost']):
node_cost_d = etree.SubElement(node_costs, "cost")
node_cost_d.set("type", 'downtime')
node_cost_d.set("value", str(row['downtime_cost']))
if not math.isnan(row['downtime_insurance_deductible']):
node_cost_d.set("deductible", str(row['downtime_insurance_deductible']))
if not math.isnan(row['downtime_insurance_limit']):
node_cost_d.set("insuranceLimit", str(row['downtime_insurance_limit']))
if not math.isnan(row['day_occupants']) or math.isnan(row['night_occupants']) or math.isnan(row['transit_occupants']):
node_occupancies = etree.SubElement(node_asset, "occupancies")
if not math.isnan(row['day_occupants']):
node_occ_day = etree.SubElement(node_occupancies, "occupancy")
node_occ_day.set("period", 'day')
node_occ_day.set("occupants", str(row['day_occupants']))
if not math.isnan(row['night_occupants']):
node_occ_night = etree.SubElement(node_occupancies, "occupancy")
node_occ_night.set("period", 'night')
node_occ_night.set("occupants", str(row['night_occupants']))
if not math.isnan(row['transit_occupants']):
node_occ_transit = etree.SubElement(node_occupancies, "occupancy")
node_occ_transit.set("period", 'transit')
node_occ_transit.set("occupants", str(row['transit_occupants']))
f.write(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding='UTF-8'))
def xml_to_csv (input_xml, output_csv):
"""
Converts the XML fragility model file to the CSV format
"""
print('This feature will be implemented in a future release.')
def set_up_arg_parser():
"""
Can run as executable. To do so, set up the command line parser
"""
description = ('Convert an Exposure Model from CSV to XML and '
'vice versa.\n\nTo convert from CSV to XML: '
'\npython exposure_model_converter.py '
'--input-csv-file PATH_TO_EXPOSURE_MODEL_CSV_FILE '
'--metadata-csv-file PATH_TO_EXPOSURE_METADATA_CSV_FILE '
'--output-xml-file PATH_TO_OUTPUT_XML_FILE'
'\n\nTo convert from XML to CSV type: '
'\npython exposure_model_converter.py '
'--input-xml-file PATH_TO_EXPOSURE_MODEL_XML_FILE '
'--output-csv-file PATH_TO_OUTPUT_CSV_FILE')
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
flags = parser.add_argument_group('flag arguments')
group_input = flags.add_argument_group('input files')
group_input_choice = group_input.add_mutually_exclusive_group(required=True)
group_input_choice.add_argument('--input-xml-file',
help='path to exposure model XML file',
default=None)
group_input_choice.add_argument('--input-csv-file',
help='path to exposure model CSV file',
default=None)
group_input.add_argument('--metadata-csv-file',
help='path to exposure metadata CSV file',
default=None,
required=True)
group_output = flags.add_argument_group('output files')
group_output.add_argument('--output-xml-file',
help='path to output XML file',
default=None,
required=False)
return parser
if __name__ == "__main__":
parser = set_up_arg_parser()
args = parser.parse_args()
if args.input_csv_file:
if args.output_xml_file:
output_file = args.output_xml_file
else:
(filename, ext) = os.path.splitext(args.input_csv_file)
output_file = filename + '.xml'
csv_to_xml(args.input_csv_file, args.metadata_csv_file, output_file)
elif args.input_xml_file:
if args.output_csv_file:
output_file = args.output_csv_file
else:
(filename, ext) = os.path.splitext(args.input_xml_file)
output_file = filename + '.csv'
xml_to_csv(args.input_xml_file, output_file)
else:
parser.print_usage() | agpl-3.0 |
harisbal/pandas | pandas/tests/dtypes/test_cast.py | 6 | 17619 | # -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type,
construct_1d_object_array_from_listlike,
construct_1d_ndarray_preserving_na,
construct_1d_arraylike_from_scalar)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_numpy_array_equal(result, arr)
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
# see gh-16875: coercing of booleans.
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, object, np.int64])
def test_downcast_conversion_no_nan(self, dtype):
expected = np.array([1, 2])
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, "infer")
tm.assert_almost_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("dtype", [np.float64, object])
def test_downcast_conversion_nan(self, dtype):
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, "infer")
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.int32, np.float64, np.float32,
np.bool_, np.int64, object])
def test_downcast_conversion_empty(self, dtype):
arr = np.array([], dtype=dtype)
result = maybe_downcast_to_dtype(arr, "int64")
tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
def test_datetimelikes_nan(self):
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]')
res = maybe_downcast_to_dtype(arr, 'datetime64[ns]')
tm.assert_numpy_array_equal(res, exp)
exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]')
res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]')
tm.assert_numpy_array_equal(res, exp)
def test_datetime_with_timezone(self):
# GH 15426
ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
exp = DatetimeIndex([ts, ts])
res = maybe_downcast_to_dtype(exp, exp.dtype)
tm.assert_index_equal(res, exp)
res = maybe_downcast_to_dtype(exp.asi8, exp.dtype)
tm.assert_index_equal(res, exp)
class TestInferDtype(object):
def test_infer_dtype_from_int_scalar(self, any_int_dtype):
# Test that infer_dtype_from_scalar is
# returning correct dtype for int and float.
data = np.dtype(any_int_dtype).type(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == type(data)
def test_infer_dtype_from_float_scalar(self, float_dtype):
float_dtype = np.dtype(float_dtype).type
data = float_dtype(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == float_dtype
def test_infer_dtype_from_python_scalar(self):
data = 12
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.int64
data = np.float(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.float64
@pytest.mark.parametrize("bool_val", [True, False])
def test_infer_dtype_from_boolean(self, bool_val):
dtype, val = infer_dtype_from_scalar(bool_val)
assert dtype == np.bool_
def test_infer_dtype_from_complex(self, complex_dtype):
data = np.dtype(complex_dtype).type(1)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.complex_
@pytest.mark.parametrize("data", [np.datetime64(1, "ns"), Timestamp(1),
datetime(2000, 1, 1, 0, 0)])
def test_infer_dtype_from_datetime(self, data):
dtype, val = infer_dtype_from_scalar(data)
assert dtype == "M8[ns]"
@pytest.mark.parametrize("data", [np.timedelta64(1, "ns"), Timedelta(1),
timedelta(1)])
def test_infer_dtype_from_timedelta(self, data):
dtype, val = infer_dtype_from_scalar(data)
assert dtype == "m8[ns]"
@pytest.mark.parametrize("freq", ["M", "D"])
def test_infer_dtype_from_period(self, freq):
p = Period("2011-01-01", freq=freq)
dtype, val = infer_dtype_from_scalar(p, pandas_dtype=True)
assert dtype == "period[{0}]".format(freq)
assert val == p.ordinal
dtype, val = infer_dtype_from_scalar(p)
assert dtype == np.object_
assert val == p
@pytest.mark.parametrize("data", [date(2000, 1, 1), "foo",
Timestamp(1, tz="US/Eastern")])
def test_infer_dtype_misc(self, data):
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.object_
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_infer_from_scalar_tz(self, tz):
dt = Timestamp(1, tz=tz)
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True)
assert dtype == 'datetime64[ns, {0}]'.format(tz)
assert val == dt.value
dtype, val = infer_dtype_from_scalar(dt)
assert dtype == np.object_
assert val == dt
def test_infer_dtype_from_scalar_errors(self):
with pytest.raises(ValueError):
infer_dtype_from_scalar(np.array([1]))
@pytest.mark.parametrize(
"arr, expected, pandas_dtype",
[('foo', np.object_, False),
(b'foo', np.object_, False),
(1, np.int_, False),
(1.5, np.float_, False),
([1], np.int_, False),
(np.array([1], dtype=np.int64), np.int64, False),
([np.nan, 1, ''], np.object_, False),
(np.array([[1.0, 2.0]]), np.float_, False),
(pd.Categorical(list('aabc')), np.object_, False),
(pd.Categorical([1, 2, 3]), np.int64, False),
(pd.Categorical(list('aabc')), 'category', True),
(pd.Categorical([1, 2, 3]), 'category', True),
(Timestamp('20160101'), np.object_, False),
(np.datetime64('2016-01-01'), np.dtype('=M8[D]'), False),
(pd.date_range('20160101', periods=3),
np.dtype('=M8[ns]'), False),
(pd.date_range('20160101', periods=3, tz='US/Eastern'),
'datetime64[ns, US/Eastern]', True),
(pd.Series([1., 2, 3]), np.float64, False),
(pd.Series(list('abc')), np.object_, False),
(pd.Series(pd.date_range('20160101', periods=3, tz='US/Eastern')),
'datetime64[ns, US/Eastern]', True)])
def test_infer_dtype_from_array(self, arr, expected, pandas_dtype):
dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype)
assert is_dtype_equal(dtype, expected)
def test_cast_scalar_to_array(self):
arr = cast_scalar_to_array((3, 2), 1, dtype=np.int64)
exp = np.ones((3, 2), dtype=np.int64)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((3, 2), 1.1)
exp = np.empty((3, 2), dtype=np.float64)
exp.fill(1.1)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((2, 3), Timestamp('2011-01-01'))
exp = np.empty((2, 3), dtype='datetime64[ns]')
exp.fill(np.datetime64('2011-01-01'))
tm.assert_numpy_array_equal(arr, exp)
# pandas dtype is stored as object dtype
obj = Timestamp('2011-01-01', tz='US/Eastern')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
obj = Period('2011-01-01', freq='D')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
class TestMaybe(object):
def test_maybe_convert_string_to_array(self):
result = maybe_convert_string_to_object('x')
tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))
assert result.dtype == object
result = maybe_convert_string_to_object(1)
assert result == 1
arr = np.array(['x', 'y'], dtype=str)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
assert result.dtype == object
# unicode
arr = np.array(['x', 'y']).astype('U')
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
assert result.dtype == object
# object
arr = np.array(['x', 2], dtype=object)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))
assert result.dtype == object
def test_maybe_convert_scalar(self):
# pass thru
result = maybe_convert_scalar('x')
assert result == 'x'
result = maybe_convert_scalar(np.array([1]))
assert result == np.array([1])
# leave scalar dtype
result = maybe_convert_scalar(np.int64(1))
assert result == np.int64(1)
result = maybe_convert_scalar(np.int32(1))
assert result == np.int32(1)
result = maybe_convert_scalar(np.float32(1))
assert result == np.float32(1)
result = maybe_convert_scalar(np.int64(1))
assert result == np.float64(1)
# coerce
result = maybe_convert_scalar(1)
assert result == np.int64(1)
result = maybe_convert_scalar(1.0)
assert result == np.float64(1)
result = maybe_convert_scalar(Timestamp('20130101'))
assert result == Timestamp('20130101').value
result = maybe_convert_scalar(datetime(2013, 1, 1))
assert result == Timestamp('20130101').value
result = maybe_convert_scalar(Timedelta('1 day 1 min'))
assert result == Timedelta('1 day 1 min').value
def test_maybe_infer_to_datetimelike(self):
# GH16362
# pandas=0.20.1 raises IndexError: tuple index out of range
result = DataFrame(np.array([[NaT, 'a', 'b', 0],
[NaT, 'b', 'c', 1]]))
assert result.size == 8
# this construction was fine
result = DataFrame(np.array([[NaT, 'a', 0],
[NaT, 'b', 1]]))
assert result.size == 6
# GH19671
result = Series(['M1701', Timestamp('20130101')])
assert result.dtype.kind == 'O'
class TestConvert(object):
def test_maybe_convert_objects_copy(self):
values = np.array([1, 2])
out = maybe_convert_objects(values, copy=False)
assert values is out
out = maybe_convert_objects(values, copy=True)
assert values is not out
values = np.array(['apply', 'banana'])
out = maybe_convert_objects(values, copy=False)
assert values is out
out = maybe_convert_objects(values, copy=True)
assert values is not out
class TestCommonTypes(object):
@pytest.mark.parametrize("source_dtypes,expected_common_dtype", [
((np.int64,), np.int64),
((np.uint64,), np.uint64),
((np.float32,), np.float32),
((np.object,), np.object),
# into ints
((np.int16, np.int64), np.int64),
((np.int32, np.uint32), np.int64),
((np.uint16, np.uint64), np.uint64),
# into floats
((np.float16, np.float32), np.float32),
((np.float16, np.int16), np.float32),
((np.float32, np.int16), np.float32),
((np.uint64, np.int64), np.float64),
((np.int16, np.float64), np.float64),
((np.float16, np.int64), np.float64),
# into others
((np.complex128, np.int32), np.complex128),
((np.object, np.float32), np.object),
((np.object, np.int16), np.object),
# bool with int
((np.dtype('bool'), np.int64), np.object),
((np.dtype('bool'), np.int32), np.object),
((np.dtype('bool'), np.int16), np.object),
((np.dtype('bool'), np.int8), np.object),
((np.dtype('bool'), np.uint64), np.object),
((np.dtype('bool'), np.uint32), np.object),
((np.dtype('bool'), np.uint16), np.object),
((np.dtype('bool'), np.uint8), np.object),
# bool with float
((np.dtype('bool'), np.float64), np.object),
((np.dtype('bool'), np.float32), np.object),
((np.dtype('datetime64[ns]'), np.dtype('datetime64[ns]')),
np.dtype('datetime64[ns]')),
((np.dtype('timedelta64[ns]'), np.dtype('timedelta64[ns]')),
np.dtype('timedelta64[ns]')),
((np.dtype('datetime64[ns]'), np.dtype('datetime64[ms]')),
np.dtype('datetime64[ns]')),
((np.dtype('timedelta64[ms]'), np.dtype('timedelta64[ns]')),
np.dtype('timedelta64[ns]')),
((np.dtype('datetime64[ns]'), np.dtype('timedelta64[ns]')),
np.object),
((np.dtype('datetime64[ns]'), np.int64), np.object)
])
def test_numpy_dtypes(self, source_dtypes, expected_common_dtype):
assert find_common_type(source_dtypes) == expected_common_dtype
def test_raises_empty_input(self):
with pytest.raises(ValueError):
find_common_type([])
def test_categorical_dtype(self):
dtype = CategoricalDtype()
assert find_common_type([dtype]) == 'category'
assert find_common_type([dtype, dtype]) == 'category'
assert find_common_type([np.object, dtype]) == np.object
def test_datetimetz_dtype(self):
dtype = DatetimeTZDtype(unit='ns', tz='US/Eastern')
assert find_common_type([dtype, dtype]) == 'datetime64[ns, US/Eastern]'
for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'),
np.dtype('datetime64[ns]'), np.object, np.int64]:
assert find_common_type([dtype, dtype2]) == np.object
assert find_common_type([dtype2, dtype]) == np.object
def test_period_dtype(self):
dtype = PeriodDtype(freq='D')
assert find_common_type([dtype, dtype]) == 'period[D]'
for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'),
PeriodDtype(freq='2D'), PeriodDtype(freq='H'),
np.dtype('datetime64[ns]'), np.object, np.int64]:
assert find_common_type([dtype, dtype2]) == np.object
assert find_common_type([dtype2, dtype]) == np.object
@pytest.mark.parametrize('datum1', [1, 2., "3", (4, 5), [6, 7], None])
@pytest.mark.parametrize('datum2', [8, 9., "10", (11, 12), [13, 14], None])
def test_cast_1d_array(self, datum1, datum2):
data = [datum1, datum2]
result = construct_1d_object_array_from_listlike(data)
# Direct comparison fails: https://github.com/numpy/numpy/issues/10218
assert result.dtype == 'object'
assert list(result) == data
@pytest.mark.parametrize('val', [1, 2., None])
def test_cast_1d_array_invalid_scalar(self, val):
pytest.raises(TypeError, construct_1d_object_array_from_listlike, val)
def test_cast_1d_arraylike_from_scalar_categorical(self):
# GH 19565 - Categorical result from scalar did not maintain categories
# and ordering of the passed dtype
cats = ['a', 'b', 'c']
cat_type = CategoricalDtype(categories=cats, ordered=False)
expected = pd.Categorical(['a', 'a'], categories=cats)
result = construct_1d_arraylike_from_scalar('a', len(expected),
cat_type)
tm.assert_categorical_equal(result, expected,
check_category_order=True,
check_dtype=True)
@pytest.mark.parametrize('values, dtype, expected', [
([1, 2, 3], None, np.array([1, 2, 3])),
(np.array([1, 2, 3]), None, np.array([1, 2, 3])),
(['1', '2', None], None, np.array(['1', '2', None])),
(['1', '2', None], np.dtype('str'), np.array(['1', '2', None])),
([1, 2, None], np.dtype('str'), np.array(['1', '2', None])),
])
def test_construct_1d_ndarray_preserving_na(values, dtype, expected):
result = construct_1d_ndarray_preserving_na(values, dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
bundgus/python-playground | matplotlib-playground/examples/user_interfaces/embedding_in_wx4.py | 2 | 3375 | #!/usr/bin/env python
"""
An example of how to use wx or wxagg in an application with a custom
toolbar
"""
# matplotlib requires wxPython 2.8+
# set the wxPython version in lib\site-packages\wx.pth file
# or if you have wxversion installed un-comment the lines below
#import wxversion
#wxversion.ensureMinimal('2.8')
from numpy import arange, sin, pi
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg
from matplotlib.backends.backend_wx import _load_bitmap
from matplotlib.figure import Figure
from numpy.random import rand
import wx
class MyNavigationToolbar(NavigationToolbar2WxAgg):
"""
Extend the default wx toolbar with your own event handlers
"""
ON_CUSTOM = wx.NewId()
def __init__(self, canvas, cankill):
NavigationToolbar2WxAgg.__init__(self, canvas)
# for simplicity I'm going to reuse a bitmap from wx, you'll
# probably want to add your own.
if 'phoenix' in wx.PlatformInfo:
self.AddTool(self.ON_CUSTOM, 'Click me',
_load_bitmap('stock_left.xpm'),
'Activate custom contol')
self.Bind(wx.EVT_TOOL, self._on_custom, id=self.ON_CUSTOM)
else:
self.AddSimpleTool(self.ON_CUSTOM, _load_bitmap('stock_left.xpm'),
'Click me', 'Activate custom contol')
self.Bind(wx.EVT_TOOL, self._on_custom, id=self.ON_CUSTOM)
def _on_custom(self, evt):
# add some text to the axes in a random location in axes (0,1)
# coords) with a random color
# get the axes
ax = self.canvas.figure.axes[0]
# generate a random location can color
x, y = tuple(rand(2))
rgb = tuple(rand(3))
# add the text and draw
ax.text(x, y, 'You clicked me',
transform=ax.transAxes,
color=rgb)
self.canvas.draw()
evt.Skip()
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1,
'CanvasFrame', size=(550, 350))
self.figure = Figure(figsize=(5, 4), dpi=100)
self.axes = self.figure.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2 * pi * t)
self.axes.plot(t, s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# Capture the paint message
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.toolbar = MyNavigationToolbar(self.canvas, True)
self.toolbar.Realize()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
self.SetSizer(self.sizer)
self.Fit()
def OnPaint(self, event):
self.canvas.draw()
event.Skip()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| mit |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axis.py | 69 | 54453 | """
Classes for the ticks and x and y axis
"""
from __future__ import division
from matplotlib import rcParams
import matplotlib.artist as artist
import matplotlib.cbook as cbook
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
class Tick(artist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
def __init__(self, axes, loc, label,
size = None, # points
gridOn = None, # defaults to axes.grid
tick1On = True,
tick2On = True,
label1On = True,
label2On = False,
major = True,
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in relative, axes coords
"""
artist.Artist.__init__(self)
if gridOn is None: gridOn = rcParams['axes.grid']
self.set_figure(axes.figure)
self.axes = axes
name = self.__name__.lower()
if size is None:
if major:
size = rcParams['%s.major.size'%name]
pad = rcParams['%s.major.pad'%name]
else:
size = rcParams['%s.minor.size'%name]
pad = rcParams['%s.minor.pad'%name]
self._tickdir = rcParams['%s.direction'%name]
if self._tickdir == 'in':
self._xtickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
self._ytickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
self._pad = pad
else:
self._xtickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
self._ytickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
self._pad = pad + size
self._loc = loc
self._size = size
self.tick1line = self._get_tick1line()
self.tick2line = self._get_tick2line()
self.gridline = self._get_gridline()
self.label1 = self._get_text1()
self.label = self.label1 # legacy name
self.label2 = self._get_text2()
self.gridOn = gridOn
self.tick1On = tick1On
self.tick2On = tick2On
self.label1On = label1On
self.label2On = label2On
self.update_position(loc)
def get_children(self):
children = [self.tick1line, self.tick2line, self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
#self.tick1line.set_clip_path(clippath, transform)
#self.tick2line.set_clip_path(clippath, transform)
self.gridline.set_clip_path(clippath, transform)
set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
def get_pad_pixels(self):
return self.figure.dpi * self._pad / 72.0
def contains(self, mouseevent):
"""
Test whether the mouse event occured in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
if callable(self._contains): return self._contains(self,mouseevent)
return False,{}
def set_pad(self, val):
"""
Set the tick label pad in points
ACCEPTS: float
"""
self._pad = val
def get_pad(self):
'Get the value of the tick label pad in points'
return self._pad
def _get_text1(self):
'Get the default Text 1 instance'
pass
def _get_text2(self):
'Get the default Text 2 instance'
pass
def _get_tick1line(self):
'Get the default line2D instance for tick1'
pass
def _get_tick2line(self):
'Get the default line2D instance for tick2'
pass
def _get_gridline(self):
'Get the default grid Line2d instance for this tick'
pass
def get_loc(self):
'Return the tick location (data coords) as a scalar'
return self._loc
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__name__)
midPoint = mtransforms.interval_contains(self.get_view_interval(), self.get_loc())
if midPoint:
if self.gridOn:
self.gridline.draw(renderer)
if self.tick1On:
self.tick1line.draw(renderer)
if self.tick2On:
self.tick2line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
def set_label1(self, s):
"""
Set the text of ticklabel
ACCEPTS: str
"""
self.label1.set_text(s)
set_label = set_label1
def set_label2(self, s):
"""
Set the text of ticklabel2
ACCEPTS: str
"""
self.label2.set_text(s)
def _set_artist_props(self, a):
a.set_figure(self.figure)
#if isinstance(a, mlines.Line2D): a.set_clip_box(self.axes.bbox)
def get_view_interval(self):
'return the view Interval instance for the axis this tick is ticking'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def _get_text1(self):
'Get the default Text instance'
# the y loc is 3 points below the min of y axis
# get the affine as an a,b,c,d,tx,ty list
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_xaxis_text1_transform(self._pad)
size = rcParams['xtick.labelsize']
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=size),
color=rcParams['xtick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text 2 instance'
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_xaxis_text2_transform(self._pad)
t = mtext.Text(
x=0, y=1,
fontproperties=font_manager.FontProperties(size=rcParams['xtick.labelsize']),
color=rcParams['xtick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(0,),
color='k',
linestyle = 'None',
marker = self._xtickmarkers[0],
markersize=self._size,
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D( xdata=(0,), ydata=(1,),
color='k',
linestyle = 'None',
marker = self._xtickmarkers[1],
markersize=self._size,
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
x = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_xdata((x,))
if self.tick2On:
self.tick2line.set_xdata((x,))
if self.gridOn:
self.gridline.set_xdata((x,))
if self.label1On:
self.label1.set_x(x)
if self.label2On:
self.label2.set_x(x)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore = False):
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
# how far from the y axis line the right of the ticklabel are
def _get_text1(self):
'Get the default Text instance'
# x in axes coords, y in data coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_yaxis_text1_transform(self._pad)
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color=rcParams['ytick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
#t.set_transform( self.axes.transData )
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text instance'
# x in axes coords, y in data coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_yaxis_text2_transform(self._pad)
t = mtext.Text(
x=1, y=0,
fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color=rcParams['ytick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (0,), (0,), color='k',
marker = self._ytickmarkers[0],
linestyle = 'None',
markersize=self._size,
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (1,), (0,), color='k',
marker = self._ytickmarkers[1],
linestyle = 'None',
markersize=self._size,
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( xdata=(0,1), ydata=(0, 0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar loc'
y = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_ydata((y,))
if self.tick2On:
self.tick2line.set_ydata((y,))
if self.gridOn:
self.gridline.set_ydata((y, ))
if self.label1On:
self.label1.set_y( y )
if self.label2On:
self.label2.set_y( y )
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore = False):
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
class Ticker:
locator = None
formatter = None
class Axis(artist.Artist):
"""
Public attributes
* :attr:`transData` - transform data coords to display coords
* :attr:`transAxis` - transform axis coords to display coords
"""
LABELPAD = 5
OFFSETTEXTPAD = 3
def __str__(self):
return self.__class__.__name__ \
+ "(%f,%f)"%tuple(self.axes.transAxes.transform_point((0,0)))
def __init__(self, axes, pickradius=15):
"""
Init the axis with the parent Axes instance
"""
artist.Artist.__init__(self)
self.set_figure(axes.figure)
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))
#class dummy:
# locator = None
# formatter = None
#self.major = dummy()
#self.minor = dummy()
self._autolabelpos = True
self.label = self._get_label()
self.offsetText = self._get_offset_text()
self.majorTicks = []
self.minorTicks = []
self.pickradius = pickradius
self.cla()
self.set_scale('linear')
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label. By default, the x
coordinate of the y label is determined by the tick label
bounding boxes, but this can lead to poor alignment of
multiple ylabels if there are multiple axes. Ditto for the y
coodinate of the x label.
You can also specify the coordinate system of the label with
the transform. If None, the default coordinate system will be
the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
is middle, etc
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
return self._scale.name
def set_scale(self, value, **kwargs):
self._scale = mscale.scale_factory(value, self, **kwargs)
self._scale.set_default_locators_and_formatters(self)
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
children = [self.label]
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
children.extend(majorticks)
children.extend(minorticks)
return children
def cla(self):
'clear the current axis'
self.set_major_locator(mticker.AutoLocator())
self.set_major_formatter(mticker.ScalarFormatter())
self.set_minor_locator(mticker.NullLocator())
self.set_minor_formatter(mticker.NullFormatter())
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))
# whether the grids are on
self._gridOnMajor = rcParams['axes.grid']
self._gridOnMinor = False
self.label.set_text('')
self._set_artist_props(self.label)
# build a few default ticks; grow as necessary later; only
# define 1 so properties set on ticks will be copied as they
# grow
cbook.popall(self.majorTicks)
cbook.popall(self.minorTicks)
self.majorTicks.extend([self._get_tick(major=True)])
self.minorTicks.extend([self._get_tick(major=False)])
self._lastNumMajorTicks = 1
self._lastNumMinorTicks = 1
self.converter = None
self.units = None
self.set_units(None)
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
def get_view_interval(self):
'return the Interval instance for this axis view limits'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
def get_data_interval(self):
'return the Interval instance for this axis data limits'
raise NotImplementedError('Derived must override')
def set_data_interval(self):
'Set the axis data limits'
raise NotImplementedError('Derived must override')
def _set_artist_props(self, a):
if a is None: return
a.set_figure(self.figure)
def iter_ticks(self):
"""
Iterate through all of the major and minor ticks.
"""
majorLocs = self.major.locator()
majorTicks = self.get_major_ticks(len(majorLocs))
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
minorLocs = self.minor.locator()
minorTicks = self.get_minor_ticks(len(minorLocs))
self.minor.formatter.set_locs(minorLocs)
minorLabels = [self.minor.formatter(val, i) for i, val in enumerate(minorLocs)]
major_minor = [
(majorTicks, majorLocs, majorLabels),
(minorTicks, minorLocs, minorLabels)]
for group in major_minor:
for tick in zip(*group):
yield tick
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticklabelBoxes = []
ticklabelBoxes2 = []
interval = self.get_view_interval()
for tick, loc, label in self.iter_ticks():
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def draw(self, renderer, *args, **kwargs):
'Draw the axis lines, grid lines, tick lines and labels'
ticklabelBoxes = []
ticklabelBoxes2 = []
if not self.get_visible(): return
renderer.open_group(__name__)
interval = self.get_view_interval()
for tick, loc, label in self.iter_ticks():
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't wan't to scale
# the actual bbox
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.draw(renderer)
if 0: # draw the bounding boxes around the text for debug
for tick in majorTicks:
label = tick.label1
mpatches.bbox_artist(label, renderer)
mpatches.bbox_artist(self.label, renderer)
renderer.close_group(__name__)
def _get_label(self):
raise NotImplementedError('Derived must override')
def _get_offset_text(self):
raise NotImplementedError('Derived must override')
def get_gridlines(self):
'Return the grid lines as a list of Line2D instance'
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline', [tick.gridline for tick in ticks])
def get_label(self):
'Return the axis label as a Text instance'
return self.label
def get_offset_text(self):
'Return the axis offsetText as a Text instance'
return self.offsetText
def get_pickradius(self):
'Return the depth of the axis used by the picker'
return self.pickradius
def get_majorticklabels(self):
'Return a list of Text instances for the major ticklabels'
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text major ticklabel', labels1+labels2)
def get_minorticklabels(self):
'Return a list of Text instances for the minor ticklabels'
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text minor ticklabel', labels1+labels2)
def get_ticklabels(self, minor=False):
'Return a list of Text instances for ticklabels'
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
'Return the major tick lines as a list of Line2D instances'
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
'Return the minor tick lines as a list of Line2D instances'
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
'Return the tick lines as a list of Line2D instances'
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"Get the major tick locations in data coordinates as a numpy array"
return self.major.locator()
def get_minorticklocs(self):
"Get the minor tick locations in data coordinates as a numpy array"
return self.minor.locator()
def get_ticklocs(self, minor=False):
"Get the tick locations in data coordinates as a numpy array"
if minor:
return self.minor.locator()
return self.major.locator()
def _get_tick(self, major):
'return the default tick intsance'
raise NotImplementedError('derived must override')
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None: return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.tick1On = src.tick1On
dest.tick2On = src.tick2On
dest.label1On = src.label1On
dest.label2On = src.label2On
def get_major_locator(self):
'Get the locator of the major ticker'
return self.major.locator
def get_minor_locator(self):
'Get the locator of the minor ticker'
return self.minor.locator
def get_major_formatter(self):
'Get the formatter of the major ticker'
return self.major.formatter
def get_minor_formatter(self):
'Get the formatter of the minor ticker'
return self.minor.formatter
def get_major_ticks(self, numticks=None):
'get the tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_major_locator()())
if len(self.majorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.majorTicks)):
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
if self._lastNumMajorTicks < numticks:
protoTick = self.majorTicks[0]
for i in range(self._lastNumMajorTicks, len(self.majorTicks)):
tick = self.majorTicks[i]
if self._gridOnMajor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMajorTicks = numticks
ticks = self.majorTicks[:numticks]
return ticks
def get_minor_ticks(self, numticks=None):
'get the minor tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_minor_locator()())
if len(self.minorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.minorTicks)):
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
if self._lastNumMinorTicks < numticks:
protoTick = self.minorTicks[0]
for i in range(self._lastNumMinorTicks, len(self.minorTicks)):
tick = self.minorTicks[i]
if self._gridOnMinor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMinorTicks = numticks
ticks = self.minorTicks[:numticks]
return ticks
def grid(self, b=None, which='major', **kwargs):
"""
Set the axis grid on or off; b is a boolean use *which* =
'major' | 'minor' to set the grid for major or minor ticks
if *b* is *None* and len(kwargs)==0, toggle the grid state. If
*kwargs* are supplied, it is assumed you want the grid on and *b*
will be set to True
*kwargs* are used to set the line properties of the grids, eg,
xax.grid(color='r', linestyle='-', linewidth=2)
"""
if len(kwargs): b = True
if which.lower().find('minor')>=0:
if b is None: self._gridOnMinor = not self._gridOnMinor
else: self._gridOnMinor = b
for tick in self.minorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMinor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
else:
if b is None: self._gridOnMajor = not self._gridOnMajor
else: self._gridOnMajor = b
for tick in self.majorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMajor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
def update_units(self, data):
"""
introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True* is *data* is
registered for unit conversion
"""
converter = munits.registry.get_converter(data)
if converter is None: return False
self.converter = converter
default = self.converter.default_units(data)
#print 'update units: default="%s", units=%s"'%(default, self.units)
if default is not None and self.units is None:
self.set_units(default)
self._update_axisinfo()
return True
def _update_axisinfo(self):
"""
check the axis converter for the stored units to see if the
axis info needs to be updated
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units)
if info is None:
return
if info.majloc is not None and self.major.locator!=info.majloc:
self.set_major_locator(info.majloc)
if info.minloc is not None and self.minor.locator!=info.minloc:
self.set_minor_locator(info.minloc)
if info.majfmt is not None and self.major.formatter!=info.majfmt:
self.set_major_formatter(info.majfmt)
if info.minfmt is not None and self.minor.formatter!=info.minfmt:
self.set_minor_formatter(info.minfmt)
if info.label is not None:
label = self.get_label()
label.set_text(info.label)
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
#print 'convert_units returning identity: units=%s, converter=%s'%(self.units, self.converter)
return x
ret = self.converter.convert(x, self.units)
#print 'convert_units converting: axis=%s, units=%s, converter=%s, in=%s, out=%s'%(self, self.units, self.converter, x, ret)
return ret
def set_units(self, u):
"""
set the units for axis
ACCEPTS: a units tag
"""
pchanged = False
if u is None:
self.units = None
pchanged = True
else:
if u!=self.units:
self.units = u
#print 'setting units', self.converter, u, munits.registry.get_converter(u)
pchanged = True
if pchanged:
self._update_axisinfo()
self.callbacks.process('units')
self.callbacks.process('units finalize')
def get_units(self):
'return the units for axis'
return self.units
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.major.formatter = formatter
formatter.set_axis(self)
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.minor.formatter = formatter
formatter.set_axis(self)
def set_major_locator(self, locator):
"""
Set the locator of the major ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.major.locator = locator
locator.set_axis(self)
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.minor.locator = locator
locator.set_axis(self)
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker
ACCEPTS: a distance in points
"""
self.pickradius = pickradius
def set_ticklabels(self, ticklabels, *args, **kwargs):
"""
Set the text values of the tick labels. Return a list of Text
instances. Use *kwarg* *minor=True* to select minor ticks.
ACCEPTS: sequence of strings
"""
#ticklabels = [str(l) for l in ticklabels]
minor = kwargs.pop('minor', False)
if minor:
self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_minor_ticks()
else:
self.set_major_formatter( mticker.FixedFormatter(ticklabels) )
ticks = self.get_major_ticks()
self.set_major_formatter( mticker.FixedFormatter(ticklabels) )
ret = []
for i, tick in enumerate(ticks):
if i<len(ticklabels):
tick.label1.set_text(ticklabels[i])
ret.append(tick.label1)
tick.label1.update(kwargs)
return ret
def set_ticks(self, ticks, minor=False):
"""
Set the locations of the tick marks from sequence ticks
ACCEPTS: sequence of floats
"""
### XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if len(ticks) > 1:
xleft, xright = self.get_view_interval()
if xright > xleft:
self.set_view_interval(min(ticks), max(ticks))
else:
self.set_view_interval(max(ticks), min(ticks))
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator( mticker.FixedLocator(ticks) )
return self.get_major_ticks(len(ticks))
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_postion(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def pan(self, numsteps):
'Pan *numsteps* (can be positive or negative)'
self.major.locator.pan(numsteps)
def zoom(self, direction):
"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
self.major.locator.zoom(direction)
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x'
def contains(self,mouseevent):
"""Test whether the mouse event occured in the x axis.
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = xaxes>=0 and xaxes<=1 and (
(y<b and y>b-self.pickradius) or
(y>t and y<t+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
return XTick(self.axes, 0, '', major=major)
def _get_label(self):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
label = mtext.Text(x=0.5, y=0,
fontproperties = font_manager.FontProperties(size=rcParams['axes.labelsize']),
color = rcParams['axes.labelcolor'],
verticalalignment='top',
horizontalalignment='center',
)
label.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(label)
self.label_position='bottom'
return label
def _get_offset_text(self):
# x in axes coords, y in display coords (to be updated at draw time)
offsetText = mtext.Text(x=1, y=0,
fontproperties = font_manager.FontProperties(size=rcParams['xtick.labelsize']),
color = rcParams['xtick.color'],
verticalalignment='top',
horizontalalignment='right',
)
offsetText.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(offsetText)
self.offset_text_position='bottom'
return offsetText
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
assert position == 'top' or position == 'bottom'
if position == 'top':
self.label.set_verticalalignment('bottom')
else:
self.label.set_verticalalignment('top')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'bottom':
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.label.set_position( (x, bottom - self.LABELPAD*self.figure.dpi / 72.0))
else:
if not len(bboxes2):
top = self.axes.bbox.ymax
else:
bbox = mtransforms.Bbox.union(bboxes2)
top = bbox.y1
self.label.set_position( (x, top+self.LABELPAD*self.figure.dpi / 72.0))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.offsetText.set_position((x, bottom-self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def get_text_heights(self, renderer):
"""
Returns the amount of space one should reserve for text
above and below the axes. Returns a tuple (above, below)
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position (top, bottom, both, default or none)
both sets the ticks to appear on both positions, but does not
change the tick labels. default resets the tick positions to
the default: ticks on both positions, labels at bottom. none
can be used if you don't want any ticks.
ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
"""
assert position in ('top', 'bottom', 'both', 'default', 'none')
ticks = list( self.get_major_ticks() ) # a copy
ticks.extend( self.get_minor_ticks() )
if position == 'top':
for t in ticks:
t.tick1On = False
t.tick2On = True
t.label1On = False
t.label2On = True
elif position == 'bottom':
for t in ticks:
t.tick1On = True
t.tick2On = False
t.label1On = True
t.label2On = False
elif position == 'default':
for t in ticks:
t.tick1On = True
t.tick2On = True
t.label1On = True
t.label2On = False
elif position == 'none':
for t in ticks:
t.tick1On = False
t.tick2On = False
else:
for t in ticks:
t.tick1On = True
t.tick2On = True
for t in ticks:
t.update_position(t._loc)
def tick_top(self):
'use ticks only on top'
self.set_ticks_position('top')
def tick_bottom(self):
'use ticks only on bottom'
self.set_ticks_position('bottom')
def get_ticks_position(self):
"""
Return the ticks position (top, bottom, default or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorTop=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorTop=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorTop and minorTop: return 'top'
MajorBottom=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
MinorBottom=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if MajorBottom and MinorBottom: return 'bottom'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
def set_data_interval(self, vmin, vmax, ignore=False):
'return the Interval instance for this axis data limits'
if ignore:
self.axes.dataLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y'
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the y axis.
Returns *True* | *False*
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = yaxes>=0 and yaxes<=1 and (
(x<l and x>l-self.pickradius) or
(x>r and x<r+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
return YTick(self.axes, 0, '', major=major)
def _get_label(self):
# x in display coords (updated by _update_label_position)
# y in axes coords
label = mtext.Text(x=0, y=0.5,
# todo: get the label position
fontproperties=font_manager.FontProperties(size=rcParams['axes.labelsize']),
color = rcParams['axes.labelcolor'],
verticalalignment='center',
horizontalalignment='right',
rotation='vertical',
)
label.set_transform( mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes) )
self._set_artist_props(label)
self.label_position='left'
return label
def _get_offset_text(self):
# x in display coords, y in axes coords (to be updated at draw time)
offsetText = mtext.Text(x=0, y=0.5,
fontproperties = font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color = rcParams['ytick.color'],
verticalalignment = 'bottom',
horizontalalignment = 'left',
)
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()) )
self._set_artist_props(offsetText)
self.offset_text_position='left'
return offsetText
def get_label_position(self):
"""
Return the label position (left or right)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (left or right)
ACCEPTS: [ 'left' | 'right' ]
"""
assert position == 'left' or position == 'right'
if position == 'right':
self.label.set_horizontalalignment('left')
else:
self.label.set_horizontalalignment('right')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'left':
if not len(bboxes):
left = self.axes.bbox.xmin
else:
bbox = mtransforms.Bbox.union(bboxes)
left = bbox.x0
self.label.set_position( (left-self.LABELPAD*self.figure.dpi/72.0, y))
else:
if not len(bboxes2):
right = self.axes.bbox.xmax
else:
bbox = mtransforms.Bbox.union(bboxes2)
right = bbox.x1
self.label.set_position( (right+self.LABELPAD*self.figure.dpi/72.0, y))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position((x, top+self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def set_offset_position(self, position):
assert position == 'left' or position == 'right'
x,y = self.offsetText.get_position()
if position == 'left': x = 0
else: x = 1
self.offsetText.set_ha(position)
self.offsetText.set_position((x,y))
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position (left, right, both or default)
both sets the ticks to appear on both positions, but
does not change the tick labels.
default resets the tick positions to the default:
ticks on both positions, labels on the left.
ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
assert position in ('left', 'right', 'both', 'default', 'none')
ticks = list( self.get_major_ticks() ) # a copy
ticks.extend( self.get_minor_ticks() )
if position == 'right':
self.set_offset_position('right')
for t in ticks:
t.tick1On = False
t.tick2On = True
t.label1On = False
t.label2On = True
elif position == 'left':
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = False
t.label1On = True
t.label2On = False
elif position == 'default':
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = True
t.label1On = True
t.label2On = False
elif position == 'none':
for t in ticks:
t.tick1On = False
t.tick2On = False
else:
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = True
def tick_right(self):
'use ticks only on right'
self.set_ticks_position('right')
def tick_left(self):
'use ticks only on left'
self.set_ticks_position('left')
def get_ticks_position(self):
"""
Return the ticks position (left, right, both or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorRight=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorRight=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorRight and minorRight: return 'right'
majorLeft=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
minorLeft=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if majorLeft and minorLeft: return 'left'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
def set_data_interval(self, vmin, vmax, ignore=False):
'return the Interval instance for this axis data limits'
if ignore:
self.axes.dataLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
| agpl-3.0 |
Iolaum/Phi1337 | scripts/pipeline/a03a_score_matrix.py | 1 | 3149 | from __future__ import division
import pickle
import pandas as pd
import numpy as np
import os
from a02a_word_count_evaluation import clean_text
from a01c_feature_engineering import tokenize_and_stem
def map_sets_to_rates(set_name):
sets_rates = {
'title_set': 'title_rate',
'descr_set': 'desc_rate',
'attr_set': 'attr_rate',
}
return sets_rates[set_name]
def create_score_dataframe():
# # step 1
# Load bow from previous script
bow_matrix = pd.read_pickle('../../dataset/bow_per_product.pickle')
# print(bow_matrix)
# # Debug
# print(bow_matrix)
# # step 2
# iterate over training data to create the score dataframe
training_data = pd.read_csv('../../dataset/preprocessed_training_data.csv')
# training_data['search_term'] = training_data['search_term'].fillna(" ")
# index = training_data['search_term'].index[training_data['search_term'].apply(np.isnan)]
# print(index)
# # debug prints
# print(bow_matrix)
# print(training_data)
all_feature_names = ['title_rate', 'desc_rate', 'attr_rate', 'relevance']
score_df = pd.DataFrame(
columns=all_feature_names,
index=training_data['id'].tolist()
)
counter = 0
for isearch in training_data.iterrows():
search_text = isearch[1].search_term
try:
search_term_set = set(search_text.split())
except:
print(search_text)
# # debug
# print search_term_set
# get p_id, search_id and relevance from tr_data
p_id = isearch[1].product_uid
np_pid = np.int64(p_id)
relevance = isearch[1].relevance
search_id = isearch[1].id
# query the bow_matrix
sets = {
'title_set': set(bow_matrix.ix[np_pid, 'title']),
'descr_set': set(bow_matrix.ix[np_pid, 'description']),
'attr_set': set(bow_matrix.ix[np_pid, 'attributes']),
}
# # debug prints
# print("")
# print p_id
# print relevance
# print sets['title_set']
# print sets['descr_set']
# print sets['attr_set']
# print search_term_set
# Instantiate each df row
score_row = {
'relevance': relevance
}
for set_name, iset in sets.iteritems():
score = calculate_field_score(iset, search_term_set)
col_name = map_sets_to_rates(set_name)
score_row[col_name] = score
score_df.loc[search_id] = pd.Series(score_row)
if (counter % 1000) == 0:
print ("Succesfully created " + str(counter) + " rows")
counter += 1
# # Debug
# print(score_df)
print score_df.shape
score_df.to_pickle('../../dataset/score_df.pickle')
print("Score Dataframe succesfully saved!")
def calculate_field_score(field_set, search_set):
comset = field_set & search_set
try:
return len(comset) / len(search_set)
except ZeroDivisionError:
print("Division Error occured")
return len(comset)
if __name__ == "__main__":
create_score_dataframe()
| apache-2.0 |
brainstorm/bcbio-nextgen | bcbio/graph/graph.py | 2 | 15188 | from __future__ import print_function
from datetime import datetime
import collections
import functools
import os
import gzip
import pytz
import re
import socket
import pandas as pd
import cPickle as pickle
from bcbio import utils
from bcbio.graph.collectl import load_collectl
mpl = utils.LazyImport("matplotlib")
plt = utils.LazyImport("matplotlib.pyplot")
pylab = utils.LazyImport("pylab")
def _setup_matplotlib():
# plt.style.use('ggplot')
mpl.use('Agg')
pylab.rcParams['image.cmap'] = 'viridis'
pylab.rcParams['figure.figsize'] = (35.0, 12.0)
# pylab.rcParams['figure.figsize'] = (100, 100)
pylab.rcParams['figure.dpi'] = 300
pylab.rcParams['font.size'] = 25
def get_bcbio_nodes(path):
"""Fetch the local nodes (-c local) that contain collectl files from
the bcbio log file.
:returns: A list with unique (non-FQDN) local hostnames
where collectl raw logs can be found.
"""
with open(path, 'r') as file_handle:
hosts = collections.defaultdict(dict)
for line in file_handle:
matches = re.search(r'\]\s([^:]+):', line)
if not matches:
continue
# Format of the record will be "[Date] host: Timing: Step" if distributed,
# otherwise the host will be missing and it means its a local run, we can stop
elif 'Timing: ' in line and line.split(': ')[1] != 'Timing':
hosts = collections.defaultdict(dict, {socket.gethostname() : {}})
break
hosts[matches.group(1)]
return hosts
def get_bcbio_timings(path):
"""Fetch timing information from a bcbio log file."""
with open(path, 'r') as file_handle:
steps = {}
for line in file_handle:
matches = re.search(r'^\[([^\]]+)\] ([^:]+: .*)', line)
if not matches:
continue
tstamp = matches.group(1)
msg = matches.group(2)
# XXX: new special logs do not have this
#if not msg.find('Timing: ') >= 0:
# continue
when = datetime.strptime(tstamp, '%Y-%m-%dT%H:%MZ').replace(
tzinfo=pytz.timezone('UTC'))
step = msg.split(":")[-1].strip()
steps[when] = step
return steps
def plot_inline_jupyter(plot):
""" Plots inside the output cell of a jupyter notebook if %matplotlib magic
is defined.
"""
_setup_matplotlib()
try:
get_ipython()
plt.show(plot)
except NameError:
pass
def this_and_prev(iterable):
"""Walk an iterable, returning the current and previous items
as a two-tuple."""
try:
item = next(iterable)
while True:
next_item = next(iterable)
yield item, next_item
item = next_item
except StopIteration:
return
def delta_from_prev(prev_values, tstamps, value):
try:
prev_val = next(prev_values)
cur_tstamp, prev_tstamp = next(tstamps)
except StopIteration:
return 0
# Take the difference from the previous value and divide by the interval
# since the previous sample, so we always return values in units/second.
return (prev_val - value) / (prev_tstamp - cur_tstamp).seconds
def calc_deltas(data_frame, series=None):
"""Many of collectl's data values are cumulative (monotonically
increasing), so subtract the previous value to determine the value
for the current interval.
"""
series = series or []
data_frame = data_frame.sort_index(ascending=True)
for s in series:
prev_values = iter(data_frame[s])
# Burn the first value, so the first row we call delta_from_prev()
# for gets its previous value from the second row in the series,
# and so on.
next(prev_values)
data_frame[s] = data_frame[s].apply(functools.partial(
delta_from_prev, iter(prev_values),
this_and_prev(iter(data_frame.index))))
return data_frame
def remove_outliers(series, stddev):
"""Remove the outliers from a series."""
return series[(series - series.mean()).abs() < stddev * series.std()]
def prep_for_graph(data_frame, series=None, delta_series=None, smoothing=None,
outlier_stddev=None):
"""Prepare a dataframe for graphing by calculating deltas for
series that need them, resampling, and removing outliers.
"""
series = series or []
delta_series = delta_series or []
graph = calc_deltas(data_frame, delta_series)
for s in series + delta_series:
if smoothing:
graph[s] = graph[s].resample(smoothing)
if outlier_stddev:
graph[s] = remove_outliers(graph[s], outlier_stddev)
return graph[series + delta_series]
def add_common_plot_features(plot, steps):
"""Add plot features common to all plots, such as bcbio step
information.
"""
_setup_matplotlib()
plot.yaxis.set_tick_params(labelright=True)
plot.set_xlabel('')
ymax = plot.get_ylim()[1]
ticks = {}
for tstamp, step in steps.items():
if step == 'finished':
continue
plot.vlines(tstamp, 0, ymax, linestyles='dashed')
tstamp = mpl.dates.num2epoch(mpl.dates.date2num(tstamp))
ticks[tstamp] = step
tick_kvs = sorted(ticks.items())
top_axis = plot.twiny()
top_axis.set_xlim(*plot.get_xlim())
top_axis.set_xticks([k for k, v in tick_kvs])
top_axis.set_xticklabels([v for k, v in tick_kvs],
rotation=45, ha='left', size=pylab.rcParams['font.size'])
plot.set_ylim(0)
return plot
def graph_cpu(data_frame, steps, num_cpus):
graph = prep_for_graph(
data_frame, delta_series=['cpu_user', 'cpu_sys', 'cpu_wait'])
graph['cpu_user'] /= 100.0
graph['cpu_sys'] /= 100.0
graph['cpu_wait'] /= 100.0
plot = graph.plot()
plot.set_ylabel('CPU core usage')
plot.set_ylim(0, num_cpus)
add_common_plot_features(plot, steps)
plot_inline_jupyter(plot)
return plot, graph
def graph_net_bytes(data_frame, steps, ifaces):
series = []
for iface in ifaces:
series.extend(['{}_rbyte'.format(iface), '{}_tbyte'.format(iface)])
graph = prep_for_graph(data_frame, delta_series=series)
for iface in ifaces:
old_series = '{}_rbyte'.format(iface)
new_series = '{}_receive'.format(iface)
graph[new_series] = graph[old_series] * 8 / 1024 / 1024
del graph[old_series]
old_series = '{}_tbyte'.format(iface)
new_series = '{}_transmit'.format(iface)
graph[new_series] = graph[old_series] * 8 / 1024 / 1024
del graph[old_series]
plot = graph.plot()
plot.set_ylabel('mbits/s')
plot.set_ylim(0, 2000)
add_common_plot_features(plot, steps)
plot_inline_jupyter(plot)
return plot, graph
def graph_net_pkts(data_frame, steps, ifaces):
series = []
for iface in ifaces:
series.extend(['{}_rpkt'.format(iface), '{}_tpkt'.format(iface)])
graph = prep_for_graph(data_frame, delta_series=series)
plot = graph.plot()
plot.set_ylabel('packets/s')
add_common_plot_features(plot, steps)
plot_inline_jupyter(plot)
return plot, graph
def graph_memory(data_frame, steps, total_mem):
graph = prep_for_graph(
data_frame, series=['mem_total', 'mem_free', 'mem_buffers',
'mem_cached'])
free_memory = graph['mem_free'] + graph['mem_buffers'] + \
graph['mem_cached']
graph = (graph['mem_total'] - free_memory) / 1024 / 1024
plot = graph.plot()
plot.set_ylabel('gbytes')
plot.set_ylim(0, total_mem)
add_common_plot_features(plot, steps)
plot_inline_jupyter(plot)
return plot, graph
def graph_disk_io(data_frame, steps, disks):
series = []
for disk in disks:
series.extend([
'{}_sectors_read'.format(disk),
'{}_sectors_written'.format(disk),
])
graph = prep_for_graph(data_frame, delta_series=series, outlier_stddev=2)
for disk in disks:
old_series = '{}_sectors_read'.format(disk)
new_series = '{}_read'.format(disk)
graph[new_series] = graph[old_series] * 512 / 1024 / 1024
del graph[old_series]
old_series = '{}_sectors_written'.format(disk)
new_series = '{}_write'.format(disk)
graph[new_series] = graph[old_series] * 512 / 1024 / 1024
del graph[old_series]
plot = graph.plot()
plot.set_ylabel('mbytes/s')
add_common_plot_features(plot, steps)
plot_inline_jupyter(plot)
return plot, graph
def log_time_frame(bcbio_log):
"""The bcbio running time frame.
:return: an instance of :class collections.namedtuple:
with the following fields: start and end
"""
output = collections.namedtuple("Time", ["start", "end", "steps"])
bcbio_timings = get_bcbio_timings(bcbio_log)
return output(min(bcbio_timings), max(bcbio_timings), bcbio_timings)
def rawfile_within_timeframe(rawfile, timeframe):
""" Checks whether the given raw filename timestamp falls within [start, end] timeframe.
"""
matches = re.search(r'-(\d{8})-', rawfile)
if matches:
ftime = datetime.strptime(matches.group(1), "%Y%m%d")
ftime = pytz.utc.localize(ftime)
return ftime.date() >= timeframe[0].date() and ftime.date() <= timeframe[1].date()
def resource_usage(bcbio_log, cluster, rawdir, verbose):
"""Generate system statistics from bcbio runs.
Parse the obtained files and put the information in
a :class pandas.DataFrame:.
:param bcbio_log: local path to bcbio log file written by the run
:param cluster:
:param rawdir: directory to put raw data files
:param verbose: increase verbosity
:return: a tuple with three dictionaries, the first one contains
an instance of :pandas.DataFrame: for each host, the second one
contains information regarding the hardware configuration and
the last one contains information regarding timing.
:type return: tuple
"""
data_frames = {}
hardware_info = {}
time_frame = log_time_frame(bcbio_log)
for collectl_file in sorted(os.listdir(rawdir)):
if not collectl_file.endswith('.raw.gz'):
continue
# Only load filenames within sampling timerange (gathered from bcbio_log time_frame)
if rawfile_within_timeframe(collectl_file, time_frame):
collectl_path = os.path.join(rawdir, collectl_file)
data, hardware = load_collectl(
collectl_path, time_frame.start, time_frame.end)
if len(data) == 0:
#raise ValueError("No data present in collectl file %s, mismatch in timestamps between raw collectl and log file?", collectl_path)
continue
host = re.sub(r'-\d{8}-\d{6}\.raw\.gz$', '', collectl_file)
hardware_info[host] = hardware
if host not in data_frames:
data_frames[host] = data
else:
data_frames[host] = pd.concat([data_frames[host], data])
return (data_frames, hardware_info, time_frame.steps)
def generate_graphs(data_frames, hardware_info, steps, outdir,
verbose=False):
"""Generate all graphs for a bcbio run."""
_setup_matplotlib()
# Hash of hosts containing (data, hardware, steps) tuple
collectl_info = collections.defaultdict(dict)
for host, data_frame in data_frames.items():
if verbose:
print('Generating CPU graph for {}...'.format(host))
graph, data_cpu = graph_cpu(data_frame, steps, hardware_info[host]['num_cpus'])
graph.get_figure().savefig(
os.path.join(outdir, '{}_cpu.png'.format(host)),
bbox_inches='tight', pad_inches=0.25)
pylab.close()
ifaces = set([series.split('_')[0]
for series in data_frame.keys()
if series.startswith(('eth', 'ib'))])
if verbose:
print('Generating network graphs for {}...'.format(host))
graph, data_net_bytes = graph_net_bytes(data_frame, steps, ifaces)
graph.get_figure().savefig(
os.path.join(outdir, '{}_net_bytes.png'.format(host)),
bbox_inches='tight', pad_inches=0.25)
pylab.close()
graph, data_net_pkts = graph_net_pkts(data_frame, steps, ifaces)
graph.get_figure().savefig(
os.path.join(outdir, '{}_net_pkts.png'.format(host)),
bbox_inches='tight', pad_inches=0.25)
pylab.close()
if verbose:
print('Generating memory graph for {}...'.format(host))
graph, data_mem = graph_memory(data_frame, steps, hardware_info[host]["memory"])
graph.get_figure().savefig(
os.path.join(outdir, '{}_memory.png'.format(host)),
bbox_inches='tight', pad_inches=0.25)
pylab.close()
if verbose:
print('Generating storage I/O graph for {}...'.format(host))
drives = set([
series.split('_')[0]
for series in data_frame.keys()
if series.startswith(('sd', 'vd', 'hd', 'xvd'))
])
graph, data_disk = graph_disk_io(data_frame, steps, drives)
graph.get_figure().savefig(
os.path.join(outdir, '{}_disk_io.png'.format(host)),
bbox_inches='tight', pad_inches=0.25)
pylab.close()
print('Serializing output to pickle object for node {}...'.format(host))
# "Clean" dataframes ready to be plotted
collectl_info[host] = { "hardware": hardware_info,
"steps": steps, "cpu": data_cpu, "mem": data_mem,
"disk": data_disk, "net_bytes": data_net_bytes,
"net_pkts": data_net_pkts
}
return collectl_info
def serialize_plot_data(collectl_info, pre_graph_info, outdir, fname="collectl_info.pickle.gz"):
# Useful to regenerate and slice graphs quickly and/or inspect locally
collectl_pickle = os.path.join(outdir, fname)
print("Saving plot pickle file with all hosts on: {}".format(collectl_pickle))
with gzip.open(collectl_pickle, "wb") as f:
pickle.dump((collectl_info, pre_graph_info), f)
def add_subparser(subparsers):
parser = subparsers.add_parser(
"graph",
help=("Generate system graphs (CPU/memory/network/disk I/O "
"consumption) from bcbio runs"))
parser.add_argument(
"log",
help="Local path to bcbio log file written by the run.")
parser.add_argument(
"-o", "--outdir", default="monitoring/graphs",
help="Directory to write graphs to.")
parser.add_argument(
"-r", "--rawdir", default="monitoring/collectl", required=True,
help="Directory to put raw collectl data files.")
parser.add_argument(
"-v", "--verbose", action="store_true", default=False,
help="Emit verbose output")
return parser
| mit |
PPKE-Bioinf/consensx.itk.ppke.hu | consensx/calc/noe_violations.py | 1 | 7120 | import math
import matplotlib.pyplot as plt
import numpy as np
from .vec_3d import Vec3D
def make_noe_hist(my_path, violations):
plt.figure(figsize=(6, 5), dpi=80)
n_groups = len(violations)
means_men = [
violations['0-0.5'], violations['0.5-1'], violations['1-1.5'],
violations['1.5-2'], violations['2-2.5'], violations['2.5-3'],
violations['3<']
]
ticks = ['0-0.5', '0.5-1', '1-1.5', '1.5-2', '2-2.5', '2.5-3', '3<']
index = np.arange(n_groups)
bar_width = 0.7
plt.bar(index, means_men, bar_width, alpha=.7, color='b')
plt.xlabel("Violation (Å)")
plt.ylabel("# of NOE distance violations")
plt.title("NOE distance violations")
plt.xticks(index + bar_width / 2, ticks)
ax = plt.axes()
ax.yaxis.grid()
plt.tight_layout()
plt.savefig(my_path + "/NOE_hist.svg", format="svg")
plt.close()
def pdb2coords(model_data):
"""Loads PDB coordinates into a dictionary, per model"""
prev_resnum = -1
pdb_coords = {}
for i in range(model_data.coordsets):
model_data.atomgroup.setACSIndex(i)
pdb_coords[i] = {}
for atom in model_data.atomgroup:
resnum = int(atom.getResnum())
name = str(atom.getName())
if resnum == prev_resnum:
pdb_coords[i][resnum][name] = Vec3D(atom.getCoords())
else:
pdb_coords[i][resnum] = {}
pdb_coords[i][resnum][name] = Vec3D(atom.getCoords())
prev_resnum = resnum
return pdb_coords
def noe_violations(model_data, my_path, db_entry, noe_restraints, bme_weights):
"""Back calculate NOE distance violations from given RDC lists and PDB
models"""
r3_averaging = db_entry.r3average
restraints = noe_restraints.resolved_restraints
pdb_coords = pdb2coords(model_data)
prev_id = -1
avg_distances = {}
all_distances = {}
measured_avg = {}
str_distaces = {}
for model in list(pdb_coords.keys()):
avg_distances[model] = {}
all_distances[model] = {}
for restraint_num, restraint in enumerate(restraints):
rest_id = int(restraint["csx_id"])
resnum1 = restraint["seq_ID1"]
atom1 = restraint["atom_ID1"]
resnum2 = restraint["seq_ID2"]
atom2 = restraint["atom_ID2"]
atom_coord1 = pdb_coords[model][resnum1][atom1]
atom_coord2 = pdb_coords[model][resnum2][atom2]
distance = (atom_coord1 - atom_coord2).magnitude()
all_distances[model][restraint_num] = distance
if prev_id == rest_id:
avg_distances[model][rest_id].append(distance)
else:
prev_id = rest_id
avg_distances[model][rest_id] = []
str_distaces[rest_id] = restraint["dist_max"]
avg_distances[model][rest_id].append(distance)
for restraint_num, restraint in enumerate(restraints):
rest_id = int(restraint["csx_id"])
resnum1 = restraint["seq_ID1"]
atom1 = restraint["atom_ID1"]
resnum2 = restraint["seq_ID2"]
atom2 = restraint["atom_ID2"]
dist_str = "> {} {} {} {} {} | ".format(
rest_id, resnum1, atom1, resnum2, atom2
)
for model in list(pdb_coords.keys()):
dist_str += "{0:.2f} ".format(all_distances[model][restraint_num])
# print("DISTS", dist_str)
# at this point avg_distances[model][curr_id] contains distances for one
# model and one restraint GROUP identified with "csx_id" number
prev_id = -1
for model in list(pdb_coords.keys()):
for restraint in restraints:
curr_id = int(restraint["csx_id"])
if prev_id == curr_id:
continue
else:
prev_id = curr_id
avg = 0.0
for distance in avg_distances[model][curr_id]:
if r3_averaging:
avg += math.pow(float(distance), -3)
else:
avg += math.pow(float(distance), -6)
avg /= len(avg_distances[model][curr_id])
if r3_averaging:
avg_distances[model][curr_id] = math.pow(avg, -1.0/3)
else:
avg_distances[model][curr_id] = math.pow(avg, -1.0/6)
# at this point avg_distances[model][curr_id] contain a single (r-6)
# averaged distance for one model and one restraint GROUP identified with
# "csx_id" number. Averaging is done on "in GROUP" distances
for restraint in restraints:
curr_id = int(restraint["curr_distID"])
avg = 0.0
if bme_weights:
for model in list(pdb_coords.keys()):
avg += math.pow(
avg_distances[model][curr_id], -6
) * bme_weights[model]
avg /= sum(bme_weights)
else:
for model in list(pdb_coords.keys()):
avg += math.pow(avg_distances[model][curr_id], -6)
avg /= len(list(pdb_coords.keys()))
measured_avg[curr_id] = math.pow(avg, -1.0/6)
bme_exp_filename = "noe_exp.dat"
bme_calc_filename = "noe_calc.dat"
with open(my_path + bme_exp_filename, "w") as exp_dat_file:
exp_dat_file.write("# DATA=NOE PRIOR=GAUSS POWER=6\n")
prev_id = -1
for restraint in restraints:
if prev_id == restraint["csx_id"]:
continue
prev_id = restraint["csx_id"]
exp_dat_file.write(
str(restraint["csx_id"]) + "\t" +
str(restraint["dist_max"]) + "\t0.1\n"
)
with open(my_path + bme_calc_filename, "w") as calc_dat_file:
for model in list(pdb_coords.keys()):
for i in avg_distances[model]:
calc_dat_file.write(
str(avg_distances[model][i]) + " "
)
calc_dat_file.write("\n")
# at this point measured_avg[curr_id] is a simple dictionary containing the
# model averaged distances for the given "csx_id" number
avg_dist_keys = list(measured_avg.keys())
avg_dist_keys.sort()
violations = {"0-0.5": 0, "0.5-1": 0, "1-1.5": 0,
"1.5-2": 0, "2-2.5": 0, "2.5-3": 0, "3<": 0}
viol_count = 0
for key in avg_dist_keys:
if measured_avg[key] > str_distaces[key]:
viol_count += 1
diff = measured_avg[key] - str_distaces[key]
if diff <= 0.5:
violations["0-0.5"] += 1
elif 0.5 < diff <= 1:
violations["0.5-1"] += 1
elif 1 < diff <= 1.5:
violations["1-1.5"] += 1
elif 1.5 < diff <= 2:
violations["1.5-2"] += 1
elif 2 < diff <= 2.5:
violations["2-2.5"] += 1
elif 2.5 < diff <= 3:
violations["2.5-3"] += 1
else:
violations["3<"] += 1
print("Total # of violations:", viol_count)
make_noe_hist(my_path, violations)
return viol_count
| mit |
rishizsinha/project-beta | code/utils/tests/test_glm.py | 4 | 4866 | import os
import sys
import numpy as np
from sklearn import linear_model as lm
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
from numpy.testing import assert_almost_equal, assert_array_equal
__file__ = os.getcwd()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),"../utils/")))
from glm import *
convolved = np.array([ 0.00000000e+00, 2.32180231e-01, 8.32180231e-01,
1.14122301e+00, 1.13435859e+00, 1.03505758e+00,
9.61128472e-01, 9.26237076e-01, 9.13560288e-01,
9.09729788e-01, 9.08724361e-01, 9.08488491e-01,
9.08488491e-01, 6.76308260e-01, 7.63082603e-02,
-2.32734516e-01, -2.25870103e-01, -1.26569090e-01,
-5.26399807e-02, -1.77485851e-02, -5.07179715e-03,
-1.24129719e-03, -2.35869919e-04, 0.00000000e+00,
0.00000000e+00, 2.32180231e-01, 8.32180231e-01,
1.14122301e+00, 1.13435859e+00, 1.03505758e+00,
9.61128472e-01, 9.26237076e-01, 9.13560288e-01,
9.09729788e-01, 9.08724361e-01, 9.08488491e-01,
9.08488491e-01, 6.76308260e-01, 7.63082603e-02,
-2.32734516e-01, -2.25870103e-01, -1.26569090e-01,
-5.26399807e-02, -1.77485851e-02, -5.07179715e-03,
-1.24129719e-03, -2.35869919e-04, 0.00000000e+00,
0.00000000e+00, 2.32180231e-01, 8.32180231e-01,
1.14122301e+00, 1.13435859e+00, 1.03505758e+00,
9.61128472e-01, 9.26237076e-01, 9.13560288e-01,
9.09729788e-01, 9.08724361e-01, 9.08488491e-01,
9.08488491e-01, 6.76308260e-01, 7.63082603e-02,
-2.32734516e-01, -2.25870103e-01, -1.26569090e-01,
-5.26399807e-02, -1.77485851e-02, -5.07179715e-03,
-1.24129719e-03, -2.35869919e-04, 0.00000000e+00,
0.00000000e+00, 2.32180231e-01, 8.32180231e-01,
1.14122301e+00, 1.13435859e+00, 1.03505758e+00,
9.61128472e-01, 9.26237076e-01, 9.13560288e-01,
9.09729788e-01, 9.08724361e-01, 9.08488491e-01,
9.08488491e-01, 6.76308260e-01, 7.63082603e-02,
-2.32734516e-01, -2.25870103e-01, -1.26569090e-01,
-5.26399807e-02, -1.77485851e-02, -5.07179715e-03,
-1.24129719e-03, -2.35869919e-04, 0.00000000e+00,
0.00000000e+00, 2.32180231e-01, 8.32180231e-01,
1.14122301e+00, 1.13435859e+00, 1.03505758e+00,
9.61128472e-01, 9.26237076e-01, 9.13560288e-01,
9.09729788e-01, 9.08724361e-01, 9.08488491e-01,
9.08488491e-01, 6.76308260e-01, 7.63082603e-02,
-2.32734516e-01, -2.25870103e-01, -1.26569090e-01,
-5.26399807e-02, -1.77485851e-02, -5.07179715e-03,
-1.24129719e-03, -2.35869919e-04, 0.00000000e+00,
0.00000000e+00, 2.32180231e-01, 8.32180231e-01,
1.14122301e+00, 1.13435859e+00, 1.03505758e+00,
9.61128472e-01, 9.26237076e-01, 9.13560288e-01,
9.09729788e-01, 9.08724361e-01, 9.08488491e-01,
9.08488491e-01, 6.76308260e-01, 7.63082603e-02,
-2.32734516e-01, -2.25870103e-01, -1.26569090e-01,
-5.26399807e-02, -1.77485851e-02, -5.07179715e-03,
-1.24129719e-03, -2.35869919e-04, 0.00000000e+00,
0.00000000e+00, 2.32180231e-01, 8.32180231e-01,
1.14122301e+00, 1.13435859e+00, 1.03505758e+00,
9.61128472e-01, 9.26237076e-01, 9.13560288e-01,
9.09729788e-01, 9.08724361e-01, 9.08488491e-01,
9.08488491e-01, 6.76308260e-01, 7.63082603e-02,
-2.32734516e-01, -2.25870103e-01, -1.26569090e-01,
-5.26399807e-02, -1.77485851e-02, -5.07179715e-03,
-1.24129719e-03, -2.35869919e-04, 0.00000000e+00,
0.00000000e+00])
## Create image data
shape_3d = (64, 64, 30)
V = np.prod(shape_3d)
T = 169
arr_2d = np.random.normal(size=(V, T))
expected_stds = np.std(arr_2d, axis=0)
data = np.reshape(arr_2d, shape_3d + (T,))
def test_glm():
actual_design = np.ones((len(convolved), 2))
actual_design[:, 1] = convolved
exp_design , exp_B_4d = glm(data, convolved)
assert_almost_equal(actual_design, exp_design)
def test_glm1():
actual_design = np.ones((len(convolved), 2))
actual_design[:, 1] = convolved
data_2d = np.reshape(data, (-1, data.shape[-1]))
actual_B = npl.pinv(actual_design).dot(data_2d.T)
actual_B_4d = np.reshape(actual_B.T, data.shape[:-1] + (-1,))
exp_design , exp_B_4d, = glm(data, convolved)
assert_almost_equal(actual_B_4d, exp_B_4d)
def test_scale_design_mtx():
actual_design = np.ones((len(convolved), 2))
actual_design[:, 1] = convolved
exp_design , exp_B_4d = glm(data, convolved)
f1=scale_design_mtx(exp_design)[-1]
r1=np.array([ 1. , 0.16938989])
assert_almost_equal(f1,r1) | bsd-3-clause |
LohithBlaze/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
krasch/smart-assistants | examples/histogram_classifiers.py | 1 | 1499 | # -*- coding: UTF-8 -*-
"""
Create a histogram that compares true positives for different classifiers/classifier settings. Allows to
check how well the classifiers succeed with predicting each of the actions.
"""
import sys
sys.path.append("..")
import pandas
from recsys.classifiers.temporal import TemporalEvidencesClassifier
from recsys.classifiers.bayes import NaiveBayesClassifier
from recsys.dataset import load_dataset
from evaluation import plot
from evaluation.metrics import QualityMetricsCalculator
import config
#configuration
data = load_dataset("../datasets/houseA.csv", "../datasets/houseA.config")
classifiers = [NaiveBayesClassifier(data.features, data.target_names),
TemporalEvidencesClassifier(data.features, data.target_names)]
#run the experiment using full dataset as training and as test data
results = []
for cls in classifiers:
cls = cls.fit(data.data, data.target)
r = cls.predict(data.data)
r = QualityMetricsCalculator(data.target, r)
results.append(r.true_positives_for_all())
#want for each classifier result only the measurements for cutoff=1
results = [r.loc[1] for r in results]
results = pandas.concat(results, axis=1)
results.columns = [cls.name for cls in classifiers]
plot_conf = plot.plot_config(config.plot_directory, sub_dirs=[data.name], prefix="histogram_classifiers", img_type=config.img_type)
plot.comparison_histogram(results, plot_conf)
print "Results can be found in the \"%s\" directory" % config.plot_directory
| mit |
aalto-trafficsense/regular-routes-server | pyfiles/prediction/pred_utils.py | 1 | 3224 | from numpy import *
from .FF import FF
def cdistance2metres(p1,p2):
from geopy.distance import vincenty
return vincenty(p1, p2).meters
def do_cluster(X, N_clusters=10):
"""
CLUSTERING: Create N_clusters clusters from the data
----------------------------------------
"""
from sklearn.cluster import KMeans
print("Clustering", len(X),"points into", N_clusters, "personal nodes")
h = KMeans(N_clusters, max_iter=100, n_init=1)
h.fit(X[:,0:2])
labels = h.labels_
nodes = h.cluster_centers_
return nodes
def snap(run,stops):
'''
SNAP
=======
snap points in 'run' to points in 'stops', return the indices.
'''
T = run.shape[0]
y = zeros(T,dtype=int)
for t in range(T):
#print t, "/", T
p = run[t,:]
dvec = sqrt((stops[:,0]-p[0])**2 + (stops[:,1]-p[1])**2)
i = argmin(dvec)
y[t] = i
return y
def do_snapping(X, nodes):
"""
SNAPPING: snap all lon/lat points in X to a cluster, return as Y.
-----------------------------------------------------------------
NOTE: it would also be a possibility to create and use an extra column in X (instead of Y).
"""
print("Snapping trace to these ", len(nodes)," waypoints")
print(X.shape,nodes.shape)
Y = snap(X[:,0:2],nodes).astype(int)
return Y
def do_movement_filtering(X,min_metres,b=10):
"""
FILTERING: Filter out boring examples
--------------------------------------
We don't want to waste time and computational resources training on stationary segments.
(Also for a demo we don't want to watch an animation where nothing is animated).
"""
print("Filter out stationary segments ...", end=' ')
T,D = X.shape
X_ = zeros(X.shape)
X_[0:b,:] = X[0:b,:]
i = b
breaks = [0]
for t in range(b,T):
xx = X[t-b:t,0:2]
# 1. get lat, lon distance
# 2. convert to metres
p1 = array([min(xx[:,0]), max(xx[:,1])])
p2 = array([max(xx[:,0]), max(xx[:,1])])
# 3. calc distance
d = cdistance2metres(p1,p2)
# 4. threshold
if d > min_metres:
#print i,"<-",t
X_[i,:] = X[t,:]
i = i + 1
else:
breaks = breaks + [t]
X = X_[0:i,:]
T,D = X.shape
print("... We filtered down from", T, "examples to", i, "consisting of around ", (T*100./(len(breaks)+T)), "% travelling.")
return X
def do_feature_filtering(X):
"""
Turn raw data `X` into more advanced features, as `Z`.
------------------------------------------------------
See `FF.py` on how this works.
Note: most of the predictive power comes from good features!!!
"""
#print "Pass thorugh ESN filter"
T,D = X.shape
#from sklearn.kernel_approximation import RBFSampler
#rbf = RBFSampler(gamma=1, random_state=1)
#H=D*2+1 #20
rtf = FF(D)
H = rtf.N_h
#X = cc.coord_center(X) # CENTER DATA
Z = zeros((T,H))
for t in range(0,T):
#print X[t,0:2], Y[t+1]
Z[t] = rtf.phi(X[t])
#print "... turned ", X.shape, "into", Z.shape
return Z
| mit |
jseabold/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
kjung/scikit-learn | sklearn/manifold/locally_linear.py | 37 | 25852 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
anderspitman/scikit-bio | skbio/diversity/alpha/tests/test_faith_pd.py | 2 | 8645 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
from io import StringIO
import os
import numpy as np
import pandas as pd
from skbio import TreeNode
from skbio.util import get_data_path
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity.alpha import faith_pd
class FaithPDTests(TestCase):
def setUp(self):
self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
self.b1 = np.array([[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1]])
self.sids1 = list('ABCD')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.t1 = TreeNode.read(StringIO(
'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
self.t1_w_extra_tips = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
')root;'))
def test_faith_pd_none_observed(self):
actual = faith_pd(np.array([], dtype=int),
np.array([], dtype=int),
self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = faith_pd([0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_faith_pd_all_observed(self):
actual = faith_pd([1, 1, 1, 1, 1], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
actual = faith_pd([1, 2, 3, 4, 5], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
def test_faith_pd(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# phylogenetic diversity implementation
actual = faith_pd(self.b1[0], self.oids1, self.t1)
expected = 4.5
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
def test_faith_pd_extra_tips(self):
# results are the same despite presences of unobserved tips in tree
actual = faith_pd(self.b1[0], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[0], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[1], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[2], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[3], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_faith_pd_minimal(self):
# two tips
tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;'))
actual = faith_pd([1, 0], ['OTU1', 'OTU2'], tree)
expected = 0.25
self.assertEqual(actual, expected)
def test_faith_pd_qiime_tiny_test(self):
# the following table and tree are derived from the QIIME 1.9.1
# "tiny-test" data
tt_table_fp = get_data_path(
os.path.join('qiime-191-tt', 'otu-table.tsv'), 'data')
tt_tree_fp = get_data_path(
os.path.join('qiime-191-tt', 'tree.nwk'), 'data')
self.q_table = pd.read_csv(tt_table_fp, sep='\t', skiprows=1,
index_col=0)
self.q_tree = TreeNode.read(tt_tree_fp)
expected_fp = get_data_path(
os.path.join('qiime-191-tt', 'faith-pd.txt'), 'data')
expected = pd.read_csv(expected_fp, sep='\t', index_col=0)
for sid in self.q_table.columns:
actual = faith_pd(self.q_table[sid],
otu_ids=self.q_table.index,
tree=self.q_tree)
self.assertAlmostEqual(actual, expected['PD_whole_tree'][sid])
def test_faith_pd_root_not_observed(self):
# expected values computed by hand
tree = TreeNode.read(
StringIO('((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
'root;'))
otu_ids = ['OTU%d' % i for i in range(1, 5)]
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered observed
actual = faith_pd([1, 1, 0, 0], otu_ids, tree)
expected = 0.6
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered observed
actual = faith_pd([0, 0, 1, 1], otu_ids, tree)
expected = 2.3
self.assertAlmostEqual(actual, expected)
def test_faith_pd_invalid_input(self):
# tree has duplicated tip ids
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, faith_pd, counts, otu_ids,
t)
# unrooted tree as input
t = TreeNode.read(StringIO('((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# len of vectors not equal
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# negative counts
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, -3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree with no branch lengths
t = TreeNode.read(
StringIO('((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO('(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids not present in tree
t = TreeNode.read(
StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, faith_pd, counts, otu_ids, t)
if __name__ == "__main__":
main()
| bsd-3-clause |
pybrain/pybrain | docs/tutorials/rl.py | 2 | 6741 | ############################################################################
# PyBrain Tutorial "Reinforcement Learning"
#
# Author: Thomas Rueckstiess, [email protected]
############################################################################
__author__ = 'Thomas Rueckstiess, [email protected]'
"""
A reinforcement learning (RL) task in pybrain always consists of a few
components that interact with each other: Environment, Agent, Task, and
Experiment. In this tutorial we will go through each of them, create
the instances and explain what they do.
But first of all, we need to import some general packages and the RL
components from PyBrain:
"""
from scipy import * #@UnusedWildImport
import matplotlib.pyplot as plt
from pybrain.rl.environments.mazes import Maze, MDPMazeTask
from pybrain.rl.learners.valuebased import ActionValueTable
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners import Q, SARSA #@UnusedImport
from pybrain.rl.experiments import Experiment
"""
For later visualization purposes, we also need to initialize the
plotting engine.
"""
plt.gray()
plt.ion()
"""
The Environment is the world, in which the agent acts. It receives input
with the .performAction() method and returns an output with
.getSensors(). All environments in PyBrain are located under
pybrain/rl/environments.
One of these environments is the maze environment, which we will use for
this tutorial. It creates a labyrinth with free fields, walls, and an
goal point. An agent can move over the free fields and needs to find the
goal point. Let's define the maze structure, a simple 2D numpy array, where
1 is a wall and 0 is a free field:
"""
structure = array([[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1]])
"""
Then we create the environment with the structure as first parameter
and the goal field tuple as second parameter:
"""
environment = Maze(structure, (7, 7))
"""
Next, we need an agent. The agent is where the learning happens. It can
interact with the environment with its .getAction() and
.integrateObservation() methods.
The agent itself consists of a controller, which maps states to actions,
a learner, which updates the controller parameters according to the
interaction it had with the world, and an explorer, which adds some
explorative behaviour to the actions. All standard agents already have a
default explorer, so we don't need to take care of that in this
tutorial.
The controller in PyBrain is a module, that takes states as inputs and
transforms them into actions. For value-based methods, like the
Q-Learning algorithm we will use here, we need a module that implements
the ActionValueInterface. There are currently two modules in PyBrain
that do this: The ActionValueTable for discrete actions and the
ActionValueNetwork for continuous actions. Our maze uses discrete
actions, so we need a table:
"""
controller = ActionValueTable(81, 4)
controller.initialize(1.)
"""
The table needs the number of states and actions as parameters. The standard
maze environment comes with the following 4 actions: north, east, south, west.
Then, we initialize the table with 1 everywhere. This is not always necessary
but will help converge faster, because unvisited state-action pairs have a
promising positive value and will be preferred over visited ones that didn't
lead to the goal.
Each agent also has a learner component. Several classes of RL learners
are currently implemented in PyBrain: black box optimizers, direct
search methods, and value-based learners. The classical Reinforcement
Learning mostly consists of value-based learning, in which of the most
well-known algorithms is the Q-Learning algorithm. Let's now create
the agent and give it the controller and learner as parameters.
"""
learner = Q()
agent = LearningAgent(controller, learner)
"""
So far, there is no connection between the agent and the environment. In fact,
in PyBrain, there is a special component that connects environment and agent: the
task. A task also specifies what the goal is in an environment and how the
agent is rewarded for its actions. For episodic experiments, the Task also
decides when an episode is over. Environments usually bring along their own
tasks. The Maze environment for example has a MDPMazeTask, that we will use.
MDP stands for "markov decision process" and means here, that the agent knows
its exact location in the maze. The task receives the environment as parameter.
"""
task = MDPMazeTask(environment)
"""
Finally, in order to learn something, we create an experiment, tell it both
task and agent (it knows the environment through the task) and let it run
for some number of steps or infinitely, like here:
"""
experiment = Experiment(task, agent)
while True:
experiment.doInteractions(100)
agent.learn()
agent.reset()
plt.pcolor(controller.params.reshape(81,4).max(1).reshape(9,9))
plt.show()
plt.pause(0.1)
"""
Above, the experiment executes 100 interactions between agent and
environment, or, to be exact, between the agent and the task. The task
will process the agent's actions, possibly scale it and hand it over to
the environment. The environment responds, returns the new state back to
the task which decides what information should be given to the agent.
The task also gives a reward value for each step to the agent.
After 100 steps, we call the agent's .learn() method and then reset it.
This will make the agent forget the previously executed steps but of
course it won't undo the changes it learned.
Then the loop is repeated, until a desired behaviour is learned.
In order to observe the learning progress, we visualize the controller
with the last two code lines in the loop. The ActionValueTable consists
of a scalar value for each state/action pair, in this case 81x4 values.
A nice way to visualize learning is to only consider the maximum value
over all actions for each state. This value is called the state-value V
and is defined as V(s) = max_a Q(s, a).
We plot the new table after learning and resetting the agent, INSIDE the
while loop. Running this code, you should see the shape of the maze and
a change of colors for the free fields. During learning, colors may jump
and change back and forth, but eventually the learning should converge
to the true state values, having higher scores (brigher fields) the
closer they are to the goal.
"""
| bsd-3-clause |
w2naf/davitpy | gme/isr/mho.py | 4 | 8556 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""
*********************
**Module**: gme.mho
*********************
This module handles Millstone Hill ISR data
**Class**:
* :class:`mhoData`: Read Millstone Hill data, either locally if it can be found, or directly from Madrigal
"""
# constants
user_fullname = 'Sebastien de Larquier'
user_email = '[email protected]'
user_affiliation = 'Virginia Tech'
#####################################################
#####################################################
class mhoData(object):
"""Read Millstone Hill data, either locally if it can be found, or directly from Madrigal
**Args**:
* **expDate** (datetime.datetime): experiment date
* **[endDate]** (datetime.datetime): end date/time to look for experiment files on Madrigal
* **[getMad]** (bool): force download from Madrigal (overwrite any matching local file)
* **[dataPath]** (str): path where the local data should be read/saved
* **[fileExt]** (str): file extension (i.e., 'g.002'). If None is provided, it will just look for the most recent available one
* **user_fullname** (str): required to download data from Madrigal (no registration needed)
* **user_email** (str): required to download data from Madrigal (no registration needed)
* **user_affiliation** (str): required to download data from Madrigal (no registration needed)
**Example**:
::
# Get data for November 17-18, 2010
import datetime as dt
user_fullname = 'Sebastien de Larquier'
user_email = '[email protected]'
user_affiliation = 'Virginia Tech'
date = dt.datetime(2010,11,17,20)
edate = dt.datetime(2010,11,18,13)
data = mhoData( date, endDate=edate,
user_fullname=user_fullname,
user_email=user_email,
user_affiliation=user_affiliation )
written by Sebastien de Larquier, 2013-03
"""
def __init__(self, expDate, endDate=None,
dataPath=None, fileExt=None, getMad=False,
user_fullname=None, user_email=None, user_affiliation=None):
self.expDate = expDate
self.endDate = endDate
self.dataPath = dataPath
self.fileExt = fileExt
self.mhoCipher = {'nel': r'N$_e$ [$\log$(m$^{-3}$)]',
'ti': r'T$_i$ [K]',
'te': r'T$_e$ [K]',
'vo': r'v$_i$ [m/s]'}
# Look for the file locally
if not getMad:
filePath = self.getFileLocal()
# If no local files, get it from madrigal
if getMad or not filePath:
if None in [user_fullname, user_email, user_affiliation]:
print 'Error: Please provide user_fullname, user_email, user_affiliation.'
return
filePath = self.getFileMad(user_fullname, user_email, user_affiliation)
print filePath
if filePath:
self.readData(filePath)
def look(self):
"""Returns radar pointing directions during selected experiment
"""
import numpy as np
pointing = [(el, az) for el, az in zip(np.around(self.elev.flatten(),1), np.around(self.azim.flatten(),1))]
return np.unique(pointing)
def readData(self, filePath):
"""Read data from HDF5 file
**Args**:
* **filePath** (str): Path and name of HDF5 file
"""
import h5py as h5
import matplotlib as mp
import numpy as np
from utils import geoPack as gp
from utils import Re
import datetime as dt
with h5.File(filePath,'r') as f:
data = f['Data']['Array Layout']
data2D = data['2D Parameters']
data1D = data['1D Parameters']
params = f['Metadata']['Experiment Parameters']
self.nel = data2D['nel'][:].T
self.ne = data2D['ne'][:].T
self.ti = data2D['ti'][:].T
self.vo = data2D['vo'][:].T
self.te = data2D['tr'][:].T * self.ti
self.range = data['range'][:]
self.time = np.array( [ dt.datetime.utcfromtimestamp(tt) for tt in data['timestamps'][:].T ] )
self.elev = np.array( [data1D['el1'][:],
data1D['el2'][:]] ).T
self.azim = np.array( [data1D['az1'][:],
data1D['az2'][:]] ).T
try:
self.scntyp = data1D['scntyp'][:]
except:
self.scntyp = np.zeros(self.time.shape)
vinds = np.where( self.elev[:,0] >= 88. )
if len(vinds[0]) > 0:
self.scntyp[vinds] = 5
self.gdalt = data2D['gdalt'][:].T
self.position = [float(params[7][1]),
float(params[8][1]),
float(params[9][1])]
self.lat = data2D['glat'][:].T
self.lon = data2D['glon'][:].T
def getFileLocal(self):
"""Look for the file in the dataPath or current directory
**Belongs to**: :class:`mhoData`
**Returns**:
* **filePath**: the path and name of the data file
"""
import os, glob, datetime
import numpy as np
fileName = 'mlh{:%y%m%d}'.format( self.expDate )
if not self.fileExt:
dfiles = np.sort(glob.glob(self.dataPath+fileName+'?.???.hdf5'))
if not not list(dfiles):
self.fileExt = dfiles[-1][-10:-5]
else: return
fileName = fileName + self.fileExt + '.hdf5'
filePath = os.path.join(self.dataPath, fileName)
return filePath
def getFileMad(self, user_fullname, user_email, user_affiliation):
"""Look for the data on Madrigal
**Belongs to**: :class:`mhoData`
**Returns**:
* **filePath**: the path and name of the data file
"""
import madrigalWeb.madrigalWeb
import os, h5py, numpy, datetime
from matplotlib.dates import date2num, epoch2num, num2date
madrigalUrl = 'http://cedar.openmadrigal.org'
madData = madrigalWeb.madrigalWeb.MadrigalData(madrigalUrl)
# Start and end date/time
sdate = self.expDate
fdate = self.endDate if self.endDate else sdate + datetime.timedelta(days=1)
# Get experiment list
expList = madData.getExperiments(30,
sdate.year, sdate.month, sdate.day, sdate.hour,
sdate.minute, sdate.second,
fdate.year, fdate.month, fdate.day, fdate.hour,
fdate.minute, fdate.second)
if not expList: return
# Try to get the default file
thisFilename = False
fileList = madData.getExperimentFiles(expList[0].id)
for thisFile in fileList:
if thisFile.category == 1:
thisFilename = thisFile.name
break
if not thisFilename: return
# Download HDF5 file
result = madData.downloadFile(thisFilename,
os.path.join( self.dataPath,"{}.hdf5"\
.format(os.path.split(thisFilename)[1]) ),
user_fullname, user_email, user_affiliation,
format="hdf5")
# Now add some derived data to the hdf5 file
res = madData.isprint(thisFilename,
'YEAR,MONTH,DAY,HOUR,MIN,SEC,RANGE,GDALT,NE,NEL,MDTYP,GDLAT,GLON',
'', user_fullname, user_email, user_affiliation)
rows = res.split("\n")
filePath = os.path.join( self.dataPath,
os.path.split(thisFilename)[1]+'.hdf5' )
self.fileExt = ( os.path.split(thisFilename)[1] )[-1]
# Add new datasets to hdf5 file
with h5py.File(filePath,'r+') as f:
ftime = epoch2num( f['Data']['Array Layout']['timestamps'] )
frange = f['Data']['Array Layout']['range']
tDim, rDim = ftime.shape[0], frange.shape[0]
shape2d = (tDim, rDim)
gdalt = numpy.empty(shape2d)
gdalt[:] = numpy.nan
gdlat = numpy.empty(shape2d)
gdlat[:] = numpy.nan
gdlon = numpy.empty(shape2d)
gdlon[:] = numpy.nan
ne = numpy.empty(shape2d)
ne[:] = numpy.nan
nel = numpy.empty(shape2d)
nel[:] = numpy.nan
dtfmt = '%Y-%m-%d %H:%M:%S'
dttype = numpy.dtype('a{}'.format(len(dtfmt)+2))
dtime = numpy.empty(tDim, dtype=dttype)
# Iterate through the downloaded data
for r in rows:
dat = r.split()
if not dat: continue
# Figure out your range/time index
dt = datetime.datetime( int(dat[0]), int(dat[1]), int(dat[2]),
int(dat[3]), int(dat[4]), int(dat[5]) )
tind = numpy.where(ftime[:] <= date2num(dt))[0]
rind = numpy.where(frange[:] <= float(dat[6]))[0]
if not list(tind) or not list(rind): continue
if dat[7] != 'missing':
gdalt[tind[-1],rind[-1]] = float(dat[7])
if dat[8] != 'missing':
ne[tind[-1],rind[-1]] = float(dat[8])
if dat[9] != 'missing':
nel[tind[-1],rind[-1]] = float(dat[9])
dtime[tind[-1]] = dt.strftime(dtfmt)
if dat[11] != 'missing':
gdlat[tind[-1],rind[-1]] = float(dat[11])
if dat[12] != 'missing':
gdlon[tind[-1],rind[-1]] = float(dat[12])
# Add 2D datasets
parent = f['Data']['Array Layout']['2D Parameters']
gdalt_ds = parent.create_dataset('gdalt', data=gdalt)
gdalt_ds = parent.create_dataset('glat', data=gdlat)
gdalt_ds = parent.create_dataset('glon', data=gdlon)
ne_ds = parent.create_dataset('ne', data=ne)
nel_ds = parent.create_dataset('nel', data=nel)
# Add 1D datasets
parent = f['Data']['Array Layout']
datetime_ds = parent.create_dataset('datetime', data=dtime)
return filePath
| gpl-3.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/mpl_toolkits/tests/__init__.py | 5 | 2335 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import difflib
import os
from matplotlib import rcParams, rcdefaults, use
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
rcParams['font.family'] = 'Bitstream Vera Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
def assert_str_equal(reference_str, test_str,
format_str=('String {str1} and {str2} do not '
'match:\n{differences}')):
"""
Assert the two strings are equal. If not, fail and print their
diffs using difflib.
"""
if reference_str != test_str:
diff = difflib.unified_diff(reference_str.splitlines(1),
test_str.splitlines(1),
'Reference', 'Test result',
'', '', 0)
raise ValueError(format_str.format(str1=reference_str,
str2=test_str,
differences=''.join(diff)))
| gpl-3.0 |
bthirion/scikit-learn | examples/exercises/plot_iris_exercise.py | 31 | 1622 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:int(.9 * n_sample)]
y_train = y[:int(.9 * n_sample)]
X_test = X[int(.9 * n_sample):]
y_test = y[int(.9 * n_sample):]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
hsiaoyi0504/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/series/test_unary.py | 3 | 1755 | import pytest
from pandas import Series
import pandas._testing as tm
class TestSeriesUnaryOps:
# __neg__, __pos__, __inv__
def test_neg(self):
ser = tm.makeStringSeries()
ser.name = "series"
tm.assert_series_equal(-ser, -1 * ser)
def test_invert(self):
ser = tm.makeStringSeries()
ser.name = "series"
tm.assert_series_equal(-(ser < 0), ~(ser < 0))
@pytest.mark.parametrize(
"source, target",
[
([1, 2, 3], [-1, -2, -3]),
([1, 2, None], [-1, -2, None]),
([-1, 0, 1], [1, 0, -1]),
],
)
def test_unary_minus_nullable_int(
self, any_signed_nullable_int_dtype, source, target
):
dtype = any_signed_nullable_int_dtype
ser = Series(source, dtype=dtype)
result = -ser
expected = Series(target, dtype=dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]])
def test_unary_plus_nullable_int(self, any_signed_nullable_int_dtype, source):
dtype = any_signed_nullable_int_dtype
expected = Series(source, dtype=dtype)
result = +expected
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"source, target",
[
([1, 2, 3], [1, 2, 3]),
([1, -2, None], [1, 2, None]),
([-1, 0, 1], [1, 0, 1]),
],
)
def test_abs_nullable_int(self, any_signed_nullable_int_dtype, source, target):
dtype = any_signed_nullable_int_dtype
ser = Series(source, dtype=dtype)
result = abs(ser)
expected = Series(target, dtype=dtype)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
nesterione/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
tetherless-world/setlr | setlr/__init__.py | 1 | 28898 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from builtins import str
from builtins import next
from builtins import object
from rdflib import *
from rdflib.util import guess_format
import rdflib
import csv
import json
import sys, collections
import requests
import pandas
import re
import os
from six import text_type as str
from jinja2 import Template
from toposort import toposort_flatten
from numpy import isnan
import uuid
import tempfile
import ijson
from . import iterparse_filter
#import xml.etree.ElementTree as ET
import xml.etree.ElementTree
from itertools import chain
import zipfile
import gzip
import logging
import hashlib
from slugify import slugify
def hash(value):
m = hashlib.sha256()
m.update(value.encode('utf-8'))
return m.hexdigest()
csvw = Namespace('http://www.w3.org/ns/csvw#')
ov = Namespace('http://open.vocab.org/terms/')
setl = Namespace('http://purl.org/twc/vocab/setl/')
prov = Namespace('http://www.w3.org/ns/prov#')
pv = Namespace('http://purl.org/net/provenance/ns#')
sp = Namespace('http://spinrdf.org/sp#')
sd = Namespace('http://www.w3.org/ns/sparql-service-description#')
dc = Namespace('http://purl.org/dc/terms/')
void = Namespace('http://rdfs.org/ns/void#')
api_vocab = Namespace('http://purl.org/linked-data/api/vocab#')
sys.setrecursionlimit(10000)
from requests_testadapter import Resp
def camelcase(s):
return slugify(s).title().replace("-","")
class LocalFileAdapter(requests.adapters.HTTPAdapter):
def build_response_from_file(self, request):
file_path = request.url[7:]
with open(file_path, 'rb') as file:
buff = bytearray(os.path.getsize(file_path))
file.readinto(buff)
resp = Resp(buff)
r = self.build_response(request, resp)
return r
def send(self, request, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
return self.build_response_from_file(request)
requests_session = requests.session()
requests_session.mount('file://', LocalFileAdapter())
requests_session.mount('file:///', LocalFileAdapter())
datatypeConverters = collections.defaultdict(lambda: str)
datatypeConverters.update({
XSD.string: str,
XSD.decimal: float,
XSD.integer: int,
XSD.float: float,
XSD.double: float
})
run_samples = False
_rdf_formats_to_guess = [
'xml',
'json-ld',
'trig',
'nquads',
'trix'
]
def read_csv(location, result):
args = dict(
sep = result.value(csvw.delimiter, default=Literal(",")).value,
#header = result.value(csvw.headerRow, default=Literal(0)).value),
skiprows = result.value(csvw.skipRows, default=Literal(0)).value,
dtype=str,
# dtype = object # Does not seem to play well with future and python2/3 conversion
)
if result.value(csvw.header):
args['header'] = [0]
with get_content(location, result) as fo:
df = pandas.read_csv(fo, encoding='utf-8', **args)
logger.debug("Loaded %s", location)
return df
def read_graph(location, result, g = None):
if g is None:
g = ConjunctiveGraph()
graph = ConjunctiveGraph(store=g.store, identifier=result.identifier)
if len(graph) == 0:
data = get_content(location, result).read()
f = guess_format(location)
for fmt in [f] + _rdf_formats_to_guess:
try:
graph.parse(data=data, format=fmt)
break
except Exception as e:
#print e
pass
if len(graph) == 0:
logger.error("Could not parse graph: %s", location)
if result[RDF.type:OWL.Ontology]:
for ontology in graph.subjects(RDF.type, OWL.Ontology):
imports = [graph.resource(x) for x in graph.objects(ontology, OWL.imports)]
for i in imports:
read_graph(i.identifier, i, g = g)
return g
class FileLikeFromIter(object):
_closed = False
def __init__(self, content_iter):
self.iter = content_iter
self.data = b''
def __iter__(self):
return self.iter
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
def closed(self):
if self._closed:
return True
if len(self.data) > 0:
return False
try:
self.data = next(self.iter)
except StopIteration:
self.closed = True
return True
return False
# Enter and Exit are needed to allow this to work with with
def __enter__(self):
return self
# Could be improved for better error/exception handling
def __exit__(self, err_type, value, tracebock):
pass
def read(self, n=None):
if n is None:
return self.data + b''.join(l for l in self.iter)
else:
while len(self.data) < n:
try:
self.data = b''.join((self.data, next(self.iter)))
except StopIteration:
break
result, self.data = self.data[:n], self.data[n:]
return result
def _open_local_file(location):
if location.startswith("file://"):
if os.name == 'nt': # skip the initial
return open(location.replace('file:///','').replace('file://',''),'rb')
else:
return open(location.replace('file://',''),'rb')
content_handlers = [
_open_local_file,
lambda location: FileLikeFromIter(requests.get(location,stream=True).iter_content(1024*1024))
]
def get_content(location, result):
response = None
for handler in content_handlers:
response = handler(location)
if response is not None:
break
if result[RDF.type:setl.Tempfile]:
result = to_tempfile(response)
for t in result[RDF.type]:
# Do we know how to unpack this?
if t.identifier in unpackers:
response = unpackers[t.identifier](response)
return response
def to_tempfile(f):
tf = tempfile.TemporaryFile()
logger.debug("Writing %s to disk.", f)
for chunk in f:
if chunk: # filter out keep-alive new chunks
tf.write(chunk)
tf.seek(0)
logger.debug("Finished writing %s to disk.", f)
return tf
def unpack_zipfile(f):
zf = zipfile.ZipFile(f, mode='r')
files = zf.infolist()
return zf.open(files[0])
unpackers = {
# setl.Tempfile : lambda x: x,
setl.ZipFile : lambda x: unpack_zipfile(to_tempfile(x)),
setl.GZipFile : lambda f: gzip.GzipFile(fileobj=f,mode='r')
}
packers = {
# setl.Tempfile : lambda x: x,
setl.GZipFile : lambda f: gzip.GzipFile(fileobj=f,mode='wb')
}
def read_excel(location, result):
args = dict(
sheet_name = result.value(setl.sheetname, default=Literal(0)).value,
header = [int(x) for x in result.value(csvw.headerRow, default=Literal('0')).value.split(',')],
skiprows = result.value(csvw.skipRows, default=Literal(0)).value
)
if result.value(csvw.header):
args['header'] = [result.value(csvw.header).value]
with get_content(location, result) as fo:
df = pandas.read_excel(fo, encoding='utf-8', **args)
return df
def read_xml(location, result):
validate_dtd = False
if result[RDF.type:setl.DTDValidatedXML]:
validate_dtd = True
f = iterparse_filter.IterParseFilter(validate_dtd=validate_dtd)
if result.value(setl.xpath) is None:
logger.debug("no xpath to select on from %s", location)
f.iter_end("/*")
for xp in result[setl.xpath]:
f.iter_end(xp.value)
with get_content(location, result) as fo:
for (i, (event, ele)) in enumerate(f.iterparse(fo)):
yield i, ele
def read_json(location, result):
selector = result.value(api_vocab.selector)
if selector is not None:
selector = selector.value
else:
selector = ""
with get_content(location, result) as fo:
yield from enumerate(ijson.items(fo, selector))
extractors = {
setl.XPORT : lambda location, result: pandas.read_sas(get_content(location, result), format='xport'),
setl.SAS7BDAT : lambda location, result: pandas.read_sas(get_content(location, result), format='sas7bdat'),
setl.Excel : read_excel,
csvw.Table : read_csv,
OWL.Ontology : read_graph,
void.Dataset : read_graph,
setl.JSON : read_json,
setl.XML : read_xml,
URIRef("https://www.iana.org/assignments/media-types/text/plain") : lambda location, result: get_content(location, result)
}
try:
from bs4 import BeautifulSoup
extractors[setl.HTML] = lambda location, result: BeautifulSoup(get_content(location, result).read(), 'html.parser')
except Exception as e:
pass
def load_csv(csv_resource):
column_descriptions = {}
for col in csv_resource[csvw.column]:
label = col.value(RDFS.label).value
column_descriptions[label] = col
csv_graph = Graph(identifier=csv_resource)
s = [x for x in csv.reader(open(str(csv_resource.value(csvw.url).identifier).replace("file://","")),
delimiter=str(csv_resource.value(csvw.delimiter,default=",").value),
quotechar=str(csv_resource.value(csvw.quoteChar,default='"').value))]
header = None
properties = []
propertyMap = {}
skip_value = csv_resource.value(csvw.null)
if skip_value is not None:
skip_value = skip_value.value
for i, r in enumerate(s):
if header is None:
header = r
for j, h in enumerate(header):
col_desc = None
if h in column_descriptions:
col_desc = column_descriptions[h]
col = csv_graph.resource(URIRef("urn:col_"+str(h)))
col.add(RDFS.label, Literal(h))
col.add(ov.csvCol, Literal(j))
if col_desc is not None:
col.add(RDFS.range, col_desc.value(RDFS.range, default=XSD.string))
properties.append(col)
propertyMap[h] = col
continue
res = csv_graph.resource(csv_resource.identifier+"_row_"+str(i))
res.add(RDF.type, csvw.Row)
res.add(csvw.rownum, Literal(i))
for j, value in enumerate(r):
if skip_value is not None and skip_value == value:
continue
#print i, j, value
prop = properties[j]
datatype = prop.value(RDFS['range'], default=XSD.string)
lit = Literal(value, datatype=datatype.identifier)
#print i, prop.identifier, lit.n3()
res.add(prop.identifier, lit)
logger.debug("Table has %s rows, %s columns, and %s triples", len(s), len(header), len(csv_graph))
return csv_graph
formats = {
None:'xml',
"application/rdf+xml":'xml',
"text/rdf":'xml',
'text/turtle':'turtle',
'application/turtle':'turtle',
'application/x-turtle':'turtle',
'text/plain':'nt',
'text/n3':'n3',
'application/trig':'trig',
'application/json':'json-ld'
}
def create_python_function(f, resources):
global_vars = {'this' : f, 'resources': resources}
local_vars = {}
script = f.value(prov.value)
for qd in f[prov.qualifiedDerivation]:
entity = resources[qd.value(prov.entity).identifier]
name = qd.value(prov.hadRole).value(dc.identifier)
local_vars[name.value] = entity
exec(script.value, local_vars, global_vars)
resources[f.identifier] = global_vars['result']
def get_order(setl_graph):
nodes = collections.defaultdict(set)
for typ in actions:
for task in setl_graph.subjects(RDF.type, typ):
task = setl_graph.resource(task)
for used in task[prov.used]:
nodes[task.identifier].add(used.identifier)
for usage in task[prov.qualifiedUsage]:
used = usage.value(prov.entity)
nodes[task.identifier].add(used.identifier)
for generated in task.subjects(prov.wasGeneratedBy):
nodes[generated.identifier].add(task.identifier)
for derivation in task[prov.qualifiedDerivation]:
derived = derivation.value(prov.entity)
nodes[task.identifier].add(derived.identifier)
return toposort_flatten(nodes)
def extract(e, resources):
logger.info('Extracting %s',e.identifier)
used = e.value(prov.used)
for result in e.subjects(prov.wasGeneratedBy):
if used is None:
used = result
for t in result[RDF.type]:
# Do we know how to generate this?
if t.identifier in extractors:
logger.info("Extracted %s", used.identifier)
resources[result.identifier] = extractors[t.identifier](used.identifier, result)
return resources[result.identifier]
def isempty(value):
try:
return isnan(value)
except:
return value is None
def clone(value):
__doc__ = '''This is only a JSON-level cloning of objects. Atomic objects are invariant, and don't need to be cloned.'''
if isinstance(value, list):
return [x for x in value]
elif isinstance(value, dict):
return dict(value)
else:
return value
functions = {}
def get_function(expr, local_keys):
key = tuple([expr]+sorted(local_keys))
if key not in functions:
script = '''lambda %s: %s'''% (', '.join(sorted(local_keys)), expr)
fn = eval(script)
fn.__name__ = expr.encode("ascii", "ignore").decode('utf8')
functions[key] = fn
return functions[key]
templates = {}
def get_template(templ):
if templ not in templates:
t = Template(templ)
templates[templ] = t
return templates[templ]
def process_row(row, template, rowname, table, resources, transform, variables):
result = []
e = {'row':row,
'name': rowname,
'table': table,
'resources': resources,
'template': template,
"transform": transform,
"setl_graph": transform.graph,
"isempty":isempty,
"slugify" : slugify,
"camelcase" : camelcase,
"hash":hash,
"isinstance":isinstance,
"str":str,
"float":float,
"int":int,
"chain": lambda x: chain(*x),
"list":list
}
e.update(variables)
e.update(rdflib.__dict__)
todo = [[x, result, e] for x in template]
while len(todo) > 0:
task, parent, env = todo.pop()
key = None
value = task
this = None
if isinstance(parent, dict):
if len(task) != 2:
logger.debug(task)
key, value = task
kt = get_template(key)
key = kt.render(**env)
if isinstance(value, dict):
if '@if' in value:
try:
fn = get_function(value['@if'], list(env.keys()))
incl = fn(**env)
if incl is None or not incl:
continue
except KeyError:
continue
except AttributeError:
continue
except TypeError:
continue
except Exception as e:
trace = sys.exc_info()[2]
logger.error("Error in conditional %s\nRelevant Environment:", value['@if'])
for key, v in list(env.items()):
#if key in value['@if']:
if hasattr(v, 'findall'):
v = xml.etree.ElementTree.tostring(v)
logger.error(key + "\t" + str(v)[:1000])
raise e
if '@for' in value:
f = value['@for']
if isinstance(f, list):
f = ' '.join(f)
variable_list, expression = f.split(" in ", 1)
variable_list = re.split(',\s+', variable_list.strip())
val = value
if '@do' in value:
val = value['@do']
else:
del val['@for']
try:
fn = get_function(expression, list(env.keys()))
values = fn(**env)
if values is not None:
for v in values:
if len(variable_list) == 1:
v = [v]
new_env = dict(env)
for i, variable in enumerate(variable_list):
new_env[variable] = v[i]
child = clone(val)
todo.append((child, parent, new_env))
except KeyError:
pass
except Exception as e:
trace = sys.exc_info()[2]
logger.error("Error in @for: %s", value['@for'])
logger.error("Locals: %s", list(env.keys()))
raise e
continue
if '@with' in value:
f = value['@with']
if isinstance(f, list):
f = ' '.join(f)
expression, variable_list = f.split(" as ", 1)
variable_list = re.split(',\s+', variable_list.strip())
val = value
if '@do' in value:
val = value['@do']
else:
del val['@with']
try:
fn = get_function(expression, list(env.keys()))
v = fn(**env)
if v is not None:
if len(variable_list) == 1:
v = [v]
new_env = dict(env)
for i, variable in enumerate(variable_list):
new_env[variable] = v[i]
child = clone(val)
todo.append((child, parent, new_env))
except KeyError:
pass
except Exception as e:
trace = sys.exc_info()[2]
logger.error("Error in with: %s", value['@with'])
logger.error("Locals: %s", list(env.keys()))
raise e
continue
this = {}
for child in list(value.items()):
if child[0] == '@if':
continue
if child[0] == '@for':
continue
todo.append((child, this, env))
elif isinstance(value, list):
this = []
for child in value:
todo.append((child, this, env))
elif isinstance(value, str):
try:
template = get_template(str(value))
this = template.render(**env)
except Exception as e:
trace = sys.exc_info()[2]
logger.error("Error in template %s %s", value, type(value))
logger.error("Relevant Environment:")
for key, v in list(env.items()):
#if key in value:
if hasattr(v, 'findall'):
v = xml.etree.ElementTree.tostring(v)
logger.error(key + "\t" + str(v)[:1000])
raise e
else:
this = value
if key is not None:
parent[key] = this
else:
parent.append(this)
return result
def json_transform(transform, resources):
logger.info("Transforming %s", transform.identifier)
tables = [u for u in transform[prov.used]]
variables = {}
for usage in transform[prov.qualifiedUsage]:
used = usage.value(prov.entity)
role = usage.value(prov.hadRole)
roleID = role.value(dc.identifier)
variables[roleID.value] = resources[used.identifier]
#print "Using", used.identifier, "as", roleID.value
generated = list(transform.subjects(prov.wasGeneratedBy))[0]
logger.info("Generating %s", generated.identifier)
if generated.identifier in resources:
result = resources[generated.identifier]
else:
result = ConjunctiveGraph()
if generated[RDF.type : setl.Persisted]:
result = ConjunctiveGraph(store="Sleepycat")
if generated[RDF.type : setl.Persisted]:
tempdir = tempfile.mkdtemp()
logger.info("Persisting %s to %s", generated.identifier, tempdir)
result.store.open(tempdir, True)
s = transform.value(prov.value).value
try:
jslt = json.loads(s)
except Exception as e:
trace = sys.exc_info()[2]
if 'No JSON object could be decoded' in e.message:
logger.error(s)
if 'line' in e.message:
line = int(re.search("line ([0-9]+)", e.message).group(1))
logger.error("Error in parsing JSON Template at line %d:", line)
logger.error('\n'.join(["%d: %s"%(i+line-3, x) for i, x in enumerate(s.split("\n")[line-3:line+4])]))
raise e
context = transform.value(setl.hasContext)
if context is not None:
context = json.loads(context.value)
for t in tables:
logger.info("Using %s", t.identifier)
table = resources[t.identifier]
it = table
if isinstance(table, pandas.DataFrame):
#if run_samples:
# table = table.head()
it = table.iterrows()
logger.info("Transforming %s rows.", len(table.index))
else:
logger.info("Transforming %s", t.identifier)
for rowname, row in it:
if run_samples and rowname >= 100:
break
try:
root = None
data = None
root = {
"@id": generated.identifier,
"@graph": process_row(row, jslt, rowname, table, resources, transform, variables)
}
if context is not None:
root['@context'] = context
before = len(result)
#graph = ConjunctiveGraph(identifier=generated.identifier)
#graph.parse(data=json.dumps(root),format="json-ld")
data = json.dumps(root)
#del root
result.parse(data=data, format="json-ld")
#del data
after = len(result)
logger.debug("Row "+str(rowname)+" added "+str(after-before)+" triples.")
sys.stdout.flush()
except Exception as e:
trace = sys.exc_info()[2]
if data is not None:
logger.error("Error parsing tree: %s", data)
if isinstance(table, pandas.DataFrame):
logger.error("Error on %s %s", rowname, row)
else:
logger.error("Error on %s", rowname)
raise e
resources[generated.identifier] = result
def transform(transform_resource, resources):
logger.info('Transforming %s',transform_resource.identifier)
transform_graph = ConjunctiveGraph()
for result in transform_graph.subjects(prov.wasGeneratedBy):
transform_graph = ConjunctiveGraph(identifier=result.identifier)
used = set(transform_resource[prov.used])
for csv in [u for u in used if u[RDF.type:csvw.Table]]:
csv_graph = Graph(store=transform_graph.store, identifier=csv)
csv_graph += graphs[csv.identifier]
for script in [u for u in used if u[RDF.type:setl.PythonScript]]:
logger.info("Script: %s", script.identifier)
s = script.value(prov.value).value
l = dict(graph = transform_graph, setl_graph = transform_resource.graph)
gl = dict()
exec(s, gl, l)
for jsldt in [u for u in used if u[RDF.type:setl.PythonScript]]:
logger.info("Script: %s", script.identifier)
s = script.value(prov.value).value
l = dict(graph = transform_graph, setl_graph = transform_resource.graph)
gl = dict()
exec(s, gl, l)
for update in [u for u in used if u[RDF.type:sp.Update]]:
logger.info("Update: %s", update.identifier)
query = update.value(prov.value).value
transform_graph.update(query)
for construct in [u for u in used if u[RDF.type:sp.Construct]]:
logger.info("Construct: %s", construct.identifier)
query = construct.value(prov.value).value
g = transform_graph.query(query)
transform_graph += g
for csv in [u for u in used if u[RDF.type:csvw.Table]]:
g = Graph(identifier=csv.identifier,store=transform_graph.store)
g.remove((None, None, None))
transform_graph.store.remove_graph(csv.identifier)
for result in transform_graph.subjects(prov.wasGeneratedBy):
graphs[result.identifier] = transform_graph
def _load_open(generated):
if generated.identifier.startswith("file://"):
if os.name == 'nt': # skip the initial
filename = generated.identifier.replace('file:///','').replace('file://','')
else:
filename = generated.identifier.replace('file://','')
fh = open(filename, 'wb')
for type, pack in packers.items():
if generated[RDF.type : type]:
return pack(fh)
return fh
def load(load_resource, resources):
logger.info('Loading %s',load_resource.identifier)
file_graph = Dataset(default_union=True)
to_disk = False
for used in load_resource[prov.used]:
if used[RDF.type : setl.Persisted]:
to_disk = True
file_graph = Dataset(store='Sleepycat', default_union=True)
tempdir = tempfile.mkdtemp()
logger.debug("Gathering %s into %s", load_resource.identifier, tempdir)
file_graph.store.open(tempdir, True)
break
if len(list(load_resource[prov.used])) == 1:
logger.info("Using %s",load_resource.value(prov.used).identifier)
file_graph = resources[load_resource.value(prov.used).identifier]
else:
for used in load_resource[prov.used]:
logger.info("Using %s",used.identifier)
used_graph = resources[used.identifier]
file_graph.namespace_manager = used_graph.namespace_manager
#print used_graph.serialize(format="trig")
file_graph.addN(used_graph.quads())
for generated in load_resource.subjects(prov.wasGeneratedBy):
# TODO: support LDP-based loading
if generated[RDF.type:pv.File]:
fmt = generated.value(dc['format'])
if fmt is not None:
fmt = fmt.value
if fmt in formats:
fmt = formats[fmt]
#print fmt
with _load_open(generated) as o:
o.write(file_graph.serialize(format=fmt))
o.close()
elif generated[RDF.type:sd.Service]:
from rdflib.plugins.stores.sparqlstore import SPARQLUpdateStore
endpoint = generated.value(sd.endpoint, default=generated).identifier
store = SPARQLUpdateStore(endpoint, endpoint, autocommit=False)
endpoint_graph = Dataset(store=store, identifier=generated.identifier, default_union=True)
endpoint_graph.addN(file_graph.quads())
endpoint_graph.commit()
#if to_disk:
# file_graph.close()
actions = {
setl.Extract : extract,
setl.Transform : json_transform,
setl.Load : load,
setl.PythonScript : create_python_function,
setl.IsEmpty : isempty
}
def _setl(setl_graph):
global logger
if logger is None:
logger = logging.getLogger(__name__)
resources = {}
resources.update(actions)
tasks = [setl_graph.resource(t) for t in get_order(setl_graph)]
for task in tasks:
action = [actions[t.identifier] for t in task[RDF.type] if t.identifier in actions]
if len(action) > 0:
action[0](task, resources)
return resources
logger = None
def main():
args = sys.argv[1:]
logging_level = logging.DEBUG
if '-q' in args or '--quiet' in args:
logging_level = logging.WARNING
logging.basicConfig(level=logging_level)
global logger
logger = logging.getLogger(__name__)
global run_samples
setl_file = args[0]
if 'sample' in args:
run_samples = True
logger.warning("Only processing a few sample rows.")
setl_graph = ConjunctiveGraph()
content = open(setl_file).read()
setl_graph.parse(data=content, format="turtle")
graphs = _setl(setl_graph)
# print "Finished processing"
# return graphs
if __name__ == '__main__':
result = main()
logger.info("Exiting")
| apache-2.0 |
mgeplf/NeuroM | neurom/view/tests/test_common.py | 5 | 7452 | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .utils import get_fig_2d, get_fig_3d # needs to be at top to trigger matplotlib Agg backend
import os
from nose import tools as nt
import shutil
import tempfile
import numpy as np
from neurom.view.common import (plt, figure_naming, get_figure, save_plot, plot_style,
plot_title, plot_labels, plot_legend, update_plot_limits, plot_ticks,
plot_sphere, plot_cylinder)
def test_figure_naming():
pretitle, posttitle, prefile, postfile = figure_naming(pretitle='Test', prefile="", postfile=3)
nt.eq_(pretitle, 'Test -- ')
nt.eq_(posttitle, "")
nt.eq_(prefile, "")
nt.eq_(postfile, "_3")
pretitle, posttitle, prefile, postfile = figure_naming(pretitle='', posttitle="Test", prefile="test", postfile="")
nt.eq_(pretitle, "")
nt.eq_(posttitle, " -- Test")
nt.eq_(prefile, "test_")
nt.eq_(postfile, "")
def test_get_figure():
fig_old = plt.figure()
fig, ax = get_figure(new_fig=False)
nt.eq_(fig, fig_old)
nt.eq_(ax.colNum, 0)
nt.eq_(ax.rowNum, 0)
fig1, ax1 = get_figure(new_fig=True, subplot=224)
nt.ok_(fig1 != fig_old)
nt.eq_(ax1.colNum, 1)
nt.eq_(ax1.rowNum, 1)
fig2, ax2 = get_figure(new_fig=True, subplot=[1, 1, 1])
nt.eq_(ax2.colNum, 0)
nt.eq_(ax2.rowNum, 0)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
fig2, ax2 = get_figure(new_fig=False)
nt.eq_(fig2, plt.gcf())
nt.eq_(ax2, plt.gca())
plt.close('all')
def test_save_plot():
fig_name = 'Figure.png'
tempdir = tempfile.mkdtemp('test_common')
try:
old_dir = os.getcwd()
os.chdir(tempdir)
fig_old = plt.figure()
fig = save_plot(fig_old)
nt.ok_(os.path.isfile(fig_name))
os.remove(fig_name)
fig = save_plot(fig_old, output_path='subdir')
nt.ok_(os.path.isfile(os.path.join(tempdir, 'subdir', fig_name)))
finally:
os.chdir(old_dir)
shutil.rmtree(tempdir)
plt.close('all')
def test_plot_title():
with get_fig_2d() as (fig, ax):
plot_title(ax)
nt.eq_(ax.get_title(), 'Figure')
with get_fig_2d() as (fig, ax):
plot_title(ax, title='Test')
nt.eq_(ax.get_title(), 'Test')
def test_plot_labels():
with get_fig_2d() as (fig, ax):
plot_labels(ax)
nt.eq_(ax.get_xlabel(), 'X')
nt.eq_(ax.get_ylabel(), 'Y')
with get_fig_2d() as (fig, ax):
plot_labels(ax, xlabel='T', ylabel='R')
nt.eq_(ax.get_xlabel(), 'T')
nt.eq_(ax.get_ylabel(), 'R')
with get_fig_3d() as (fig0, ax0):
plot_labels(ax0)
nt.eq_(ax0.get_zlabel(), 'Z')
with get_fig_3d() as (fig0, ax0):
plot_labels(ax0, zlabel='T')
nt.eq_(ax0.get_zlabel(), 'T')
def test_plot_legend():
with get_fig_2d() as (fig, ax):
plot_legend(ax)
legend = ax.get_legend()
nt.ok_(legend is None)
with get_fig_2d() as (fig, ax):
ax.plot([1, 2, 3], [1, 2, 3], label='line 1')
plot_legend(ax, no_legend=False)
legend = ax.get_legend()
nt.eq_(legend.get_texts()[0].get_text(), 'line 1')
def test_plot_limits():
with get_fig_2d() as (fig, ax):
nt.assert_raises(AssertionError, update_plot_limits, ax, white_space=0)
with get_fig_2d() as (fig, ax):
ax.dataLim.update_from_data_xy(((0, -100), (100, 0)))
update_plot_limits(ax, white_space=0)
nt.eq_(ax.get_xlim(), (0, 100))
nt.eq_(ax.get_ylim(), (-100, 0))
with get_fig_3d() as (fig0, ax0):
update_plot_limits(ax0, white_space=0)
zlim0 = ax0.get_zlim()
nt.ok_(np.allclose(ax0.get_zlim(), zlim0))
def test_plot_ticks():
with get_fig_2d() as (fig, ax):
plot_ticks(ax)
nt.ok_(len(ax.get_xticks()))
nt.ok_(len(ax.get_yticks()))
with get_fig_2d() as (fig, ax):
plot_ticks(ax, xticks=[], yticks=[])
nt.eq_(len(ax.get_xticks()), 0)
nt.eq_(len(ax.get_yticks()), 0)
with get_fig_2d() as (fig, ax):
plot_ticks(ax, xticks=np.arange(3), yticks=np.arange(4))
nt.eq_(len(ax.get_xticks()), 3)
nt.eq_(len(ax.get_yticks()), 4)
with get_fig_3d() as (fig0, ax0):
plot_ticks(ax0)
nt.ok_(len(ax0.get_zticks()))
with get_fig_3d() as (fig0, ax0):
plot_ticks(ax0, zticks=[])
nt.eq_(len(ax0.get_zticks()), 0)
with get_fig_3d() as (fig0, ax0):
plot_ticks(ax0, zticks=np.arange(3))
nt.eq_(len(ax0.get_zticks()), 3)
def test_plot_style():
with get_fig_2d() as (fig, ax):
ax.dataLim.update_from_data_xy(((0, -100), (100, 0)))
plot_style(fig, ax)
nt.eq_(ax.get_title(), 'Figure')
nt.eq_(ax.get_xlabel(), 'X')
nt.eq_(ax.get_ylabel(), 'Y')
with get_fig_2d() as (fig, ax):
ax.dataLim.update_from_data_xy(((0, -100), (100, 0)))
plot_style(fig, ax, no_axes=True)
nt.ok_(not ax.get_frame_on())
nt.ok_(not ax.xaxis.get_visible())
nt.ok_(not ax.yaxis.get_visible())
with get_fig_2d() as (fig, ax):
ax.dataLim.update_from_data_xy(((0, -100), (100, 0)))
plot_style(fig, ax, tight=True)
nt.ok_(fig.get_tight_layout())
def test_plot_cylinder():
fig0, ax0 = get_figure(params={'projection': '3d'})
start, end = np.array([0, 0, 0]), np.array([1, 0, 0])
plot_cylinder(ax0, start=start, end=end,
start_radius=0, end_radius=10.,
color='black', alpha=1.)
nt.ok_(ax0.has_data())
def test_plot_sphere():
fig0, ax0 = get_figure(params={'projection': '3d'})
plot_sphere(ax0, [0, 0, 0], 10., color='black', alpha=1.)
nt.ok_(ax0.has_data())
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/datasets/species_distributions.py | 9 | 7865 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
Capepy/scipy_2015_sklearn_tutorial | notebooks/figures/plot_rbf_svm_parameters.py | 19 | 2018 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
from .plot_2d_separator import plot_2d_separator
def make_handcrafted_dataset():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
def plot_rbf_svm_parameters():
X, y = make_handcrafted_dataset()
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
for ax, C in zip(axes, [1e0, 5, 10, 100]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(kernel='rbf', C=C).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("C = %f" % C)
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("gamma = %f" % gamma)
def plot_svm(log_C, log_gamma):
X, y = make_handcrafted_dataset()
C = 10. ** log_C
gamma = 10. ** log_gamma
svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
ax = plt.gca()
plot_2d_separator(svm, X, ax=ax, eps=.5)
# plot data
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
# plot support vectors
sv = svm.support_vectors_
ax.scatter(sv[:, 0], sv[:, 1], s=230, facecolors='none', zorder=10, linewidth=3)
ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_svm_interactive():
from IPython.html.widgets import interactive, FloatSlider
C_slider = FloatSlider(min=-3, max=3, step=.1, value=0, readout=False)
gamma_slider = FloatSlider(min=-2, max=2, step=.1, value=0, readout=False)
return interactive(plot_svm, log_C=C_slider, log_gamma=gamma_slider)
| cc0-1.0 |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_mfcc_5/src/evaluation.py | 56 | 43426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy
import sys
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
return numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1) + self.eps)
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth,
labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i][
'event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar,
percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall[
'Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
acompa/leptoid | leptoid/graphite.py | 1 | 2954 | """ Defining class responsible for retrieving data from Graphite. """
import pandas
import pickle
from time import ctime
from urllib2 import urlopen
from string import join
from collections import defaultdict
import logging
LOG = logging.getLogger('graphite')
from leptoid.utils import parse_namespace_contents
SERIES_WINDOW_SIZE = 5
def build_graphite_call(targets, api_params):
""" Construct /render API call to Graphite. API params retrieved
from YAML in a dict. Details on parameters at:
http://graphite.readthedocs.org/en/0.9.10/render_api.html
Parameters
----------
targets
list of strs, queries for Graphite. Can accept either a single
namespace or a list of namespaces.
Returns: pickled object sent by Graphite with namespace data.
"""
# Convert single namespace to a list.
if not isinstance(targets, list):
targets = list(targets)
target_list = ["&target=%s" % target for target in targets]
LOG.log(logging.DEBUG, "Building Graphite /render API call.")
call = "https://graphite.knewton.net/render/?"
for config in api_params.iteritems():
call += '&%s' % '='.join(config)
call += join(target_list, "")
return call
def call_graphite(targets, api_params):
"""
Construct /render API call and retrieves pickled response from Graphite.
API params retrieved from YAML in as a dict. Details at:
http://graphite.readthedocs.org/en/0.9.10/render_api.html
Parameters
----------
targets
list of strs, target namespaces for Graphite query
Can accept either a single namespace or a list of namespaces.
Returns: pickled object sent by Graphite with namespace data.
"""
call = build_graphite_call(targets, api_params)
LOG.log(logging.INFO, "Calling Graphite with %s" % call)
response = urlopen(call)
return pickle.load(response)
def extract_time_series(graphite_data):
"""
Parses raw Graphite data into time series grouped by instance. All time
series are moving averages, with window size defined above.
Parameters
----------
graphite_data
pickled obj returned from Graphite's /render API.
Returns: dict with key=metric name, val=pandas.TimeSeries.
"""
namespace_data = dict([(env, defaultdict(dict))
for env in 'production', 'staging'])
for rawdata in graphite_data:
# Must convert epoch seconds to datetime string.
starttime = ctime(rawdata['start'])
# seriesidx provides timestamps for pandas.TimeSeries
seriesidx = pandas.PeriodIndex(
start=starttime, periods=len(rawdata['values']))
tser = pandas.TimeSeries(data=rawdata['values'], index=seriesidx)
tser = pandas.rolling_mean(tser, SERIES_WINDOW_SIZE, SERIES_WINDOW_SIZE)
tser = tser.fillna(0) # handle all nans
# Populate dict with pandas.TimeSeries for each service's instances.
# Finding instance in metric name by identifying substring with 'i-'.
env, service, instance_name = parse_namespace_contents(rawdata['name'])
namespace_data[env][service][instance_name] = tser
return namespace_data
| apache-2.0 |
kjung/scikit-learn | examples/applications/plot_out_of_core_classification.py | 32 | 13829 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
hennersz/pySpace | basemap/examples/make_inset.py | 4 | 1068 | from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
# Set up the primary map
fig = plt.figure()
ax = fig.add_subplot(111)
bmap =\
Basemap(projection='lcc',width=6000.e3,height=4000.e3,lon_0=-90,lat_0=40,resolution='l',ax=ax)
bmap.fillcontinents(color='coral', lake_color='aqua')
bmap.drawcountries()
bmap.drawstates()
bmap.drawmapboundary(fill_color='aqua')
bmap.drawcoastlines()
plt.title('map with an inset showing where the map is')
# axes for inset map.
axin = inset_axes(bmap.ax,width="30%",height="30%",loc=4)
# inset map is global, with primary map projection region drawn on it.
omap = Basemap(projection='ortho',lon_0=-105,lat_0=40,ax=axin,anchor='NE')
omap.drawcountries(color='white')
omap.fillcontinents(color='gray') #color = 'coral'
bx, by = omap(bmap.boundarylons, bmap.boundarylats)
xy = list(zip(bx,by))
mapboundary = Polygon(xy,edgecolor='red',linewidth=2,fill=False)
omap.ax.add_patch(mapboundary)
plt.show()
| gpl-3.0 |
h2educ/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
flightgong/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
pearsonlab/thunder | thunder/extraction/block/methods/nmf.py | 8 | 3873 | from thunder.extraction.block.base import BlockMethod, BlockAlgorithm
from thunder.extraction.source import Source
class BlockNMF(BlockMethod):
def __init__(self, **kwargs):
algorithm = NMFBlockAlgorithm(**kwargs)
super(self.__class__, self).__init__(algorithm, **kwargs)
class NMFBlockAlgorithm(BlockAlgorithm):
"""
Find sources using non-negative matrix factorization on blocks.
NMF on each block provides a candidate set of basis functions.
These are then converted into regions using simple morphological operators:
labeling connected components, and removing all that fail to meet
min and max size thresholds.
Parameters
----------
maxIter : int, optional, default = 10
Maximum number of iterations
componentsPerBlock : int, optional, deafut = 3
Number of components to find per block
"""
def __init__(self, maxIter=10, componentsPerBlock=3, percentile=75,
minArea=50, maxArea="block", medFilter=2, overlap=0.4, **extra):
self.maxIter = maxIter
self.componentsPerBlock = componentsPerBlock
self.percentile = percentile
self.minArea = minArea
self.maxArea = maxArea
self.medFilter = medFilter if medFilter is not None and medFilter > 0 else None
self.overlap = overlap if overlap is not None and overlap > 0 else None
def extract(self, block):
from numpy import clip, inf, percentile, asarray, where, size, prod, unique
from scipy.ndimage import median_filter
from sklearn.decomposition import NMF
from skimage.measure import label
from skimage.morphology import remove_small_objects
# get dimensions
n = self.componentsPerBlock
dims = block.shape[1:]
# handle maximum size
if self.maxArea == "block":
maxArea = prod(dims) / 2
else:
maxArea = self.maxArea
# reshape to be t x all spatial dimensions
data = block.reshape(block.shape[0], -1)
# build and apply NMF model to block
model = NMF(n, max_iter=self.maxIter)
model.fit(clip(data, 0, inf))
# reconstruct sources as spatial objects in one array
comps = model.components_.reshape((n,) + dims)
# convert from basis functions into shape
# by median filtering (optional), applying a threshold,
# finding connected components and removing small objects
combined = []
for c in comps:
tmp = c > percentile(c, self.percentile)
regions = remove_small_objects(label(tmp), min_size=self.minArea)
ids = unique(regions)
ids = ids[ids > 0]
for ii in ids:
r = regions == ii
if self.medFilter is not None:
r = median_filter(r, self.medFilter)
coords = asarray(where(r)).T
if (size(coords) > 0) and (size(coords) < maxArea):
combined.append(Source(coords))
# merge overlapping sources
if self.overlap is not None:
# iterate over source pairs and find a pair to merge
def merge(sources):
for i1, s1 in enumerate(sources):
for i2, s2 in enumerate(sources[i1+1:]):
if s1.overlap(s2) > self.overlap:
return i1, i1 + 1 + i2
return None
# merge pairs until none left to merge
pair = merge(combined)
testing = True
while testing:
if pair is None:
testing = False
else:
combined[pair[0]].merge(combined[pair[1]])
del combined[pair[1]]
pair = merge(combined)
return combined
| apache-2.0 |
kalfasyan/DA224x | code/eigens/params100.py | 1 | 6863 | import random
import numpy as np
import itertools
import matplotlib.pylab as plt
import decimal
import math
nrn_type = "iaf_neuron"
exc_nrns_mc = 64
inh_nrns_mc = 16
lr_mc = 3
mc_hc = 4
hc = 3
nrns = (exc_nrns_mc+inh_nrns_mc)*hc*mc_hc*lr_mc
q = 1
sigma = math.sqrt(q/decimal.Decimal(nrns))
sigma2 = math.sqrt(1/decimal.Decimal(nrns))
mu = 0
nrns_hc = nrns/hc
nrns_mc = nrns_hc/mc_hc
nrns_l23 = nrns_mc*34/100
nrns_l4 = nrns_mc*33/100
nrns_l5 = nrns_mc*33/100
print nrns,"neurons."
print nrns_hc, "per hypercolumn in %s" %hc,"hypercolumns."
print nrns_mc, "per minicolumn in %s" %mc_hc,"minicolumns."
print nrns_l23, nrns_l4, nrns_l5, "in layers23 layer4 and layer5 respectively"
##############################################################
""" 2. Creating list of Hypercolumns, list of minicolumns within
hypercolumns, list of layers within minicolumns within
hypercolumns"""
split = [i for i in range(nrns)]
split_hc = zip(*[iter(split)]*nrns_hc)
split_mc = []
split_lr23,split_lr4,split_lr5 = [],[],[]
for i in range(len(split_hc)):
split_mc.append(zip(*[iter(split_hc[i])]*nrns_mc))
for j in range(len(split_mc[i])):
split_lr23.append(split_mc[i][j][0:nrns_l23])
split_lr4.append(split_mc[i][j][nrns_l23:nrns_l23+nrns_l4])
split_lr5.append(split_mc[i][j][nrns_l23+nrns_l4:])
split_exc,split_inh = [],[]
for i in range(len(split_lr23)):
split_exc.append(split_lr23[i][0:int(round(80./100.*(len(split_lr23[i]))))])
split_inh.append(split_lr23[i][int(round(80./100.*(len(split_lr23[i])))):])
for i in range(len(split_lr4)):
split_exc.append(split_lr4[i][0:int(round(80./100.*(len(split_lr4[i]))))])
split_inh.append(split_lr4[i][int(round(80./100.*(len(split_lr4[i])))):])
for i in range(len(split_lr5)):
split_exc.append(split_lr5[i][0:int(round(80./100.*(len(split_lr5[i]))))])
split_inh.append(split_lr5[i][int(round(80./100.*(len(split_lr5[i])))):])
##############################################################
""" 3. Creating sets for all minicolumns and all layers """
hypercolumns = set(split_hc)
minitemp = []
for i in range(len(split_mc)):
for j in split_mc[i]:
minitemp.append(j)
minicolumns = set(minitemp)
layers23 = set(list(itertools.chain.from_iterable(split_lr23)))
layers4 = set(list(itertools.chain.from_iterable(split_lr4)))
layers5 = set(list(itertools.chain.from_iterable(split_lr5)))
exc_nrns_set = set(list(itertools.chain.from_iterable(split_exc)))
inh_nrns_set = set(list(itertools.chain.from_iterable(split_inh)))
exc = [None for i in range(len(exc_nrns_set))]
inh = [None for i in range(len(inh_nrns_set))]
#################### FUNCTIONS #####################################
""" Checks if 2 neurons belong in the same hypercolumn """
def same_hypercolumn(q,w):
for i in hypercolumns:
if q in i and w in i:
return True
return False
""" Checks if 2 neurons belong in the same minicolumn """
def same_minicolumn(q,w):
for mc in minicolumns:
if q in mc and w in mc:
return True
return False
""" Checks if 2 neurons belong in the same layer """
def same_layer(q,w):
if same_hypercolumn(q,w):
if q in layers23 and w in layers23:
return True
elif q in layers4 and w in layers4:
return True
elif q in layers5 and w in layers5:
return True
return False
def next_hypercolumn(q,w):
if same_hypercolumn(q,w):
return False
for i in range(len(split_hc)):
for j in split_hc[i]:
if j < len(split_hc):
if (q in split_hc[i] and w in split_hc[i+1]):
return True
return False
def prev_hypercolumn(q,w):
if same_hypercolumn(q,w):
return False
for i in range(len(split_hc)):
for j in split_hc[i]:
if i >0:
if (q in split_hc[i] and w in split_hc[i-1]):
return True
return False
def diff_hypercolumns(q,w):
if next_hypercolumn(q,w):
if (q in layers5 and w in layers4):
return flip(0.20,q)
elif prev_hypercolumn(q,w):
if (q in layers5 and w in layers23):
return flip(0.20,q)
return 0
def both_exc(q,w):
if same_layer(q,w):
if (q in exc_nrns_set and w in exc_nrns_set):
return True
return False
def both_inh(q,w):
if same_layer(q,w):
if (q in inh_nrns_set and w in inh_nrns_set):
return True
return False
""" Returns 1 under probability 'p', else 0 (0<=p<=1)"""
def flipAdj(p,q):
if q in exc_nrns_set:
return 1 if random.random() < p else 0
elif q in inh_nrns_set:
return -1 if random.random() < p else 0
def flip(p,q):
p+=.0
r=0# np.random.uniform(0,sigma)
if q in exc_nrns_set:
return (np.random.normal(0,sigma)+.5) if random.random() < p-r else 0
elif q in inh_nrns_set:
return (np.random.normal(0,sigma)-.5) if random.random() < p-r else 0
def flip2(p,q):
a = decimal.Decimal(0.002083333)
if q in exc_nrns_set:
return (abs(np.random.normal(0,a))) if random.random() < p else 0
elif q in inh_nrns_set:
return (-abs(np.random.normal(0,a))) if random.random() < p else 0
def check_zero(z):
unique, counts = np.unique(z, return_counts=True)
occurence = np.asarray((unique, counts)).T
for i in range(len(z)):
if np.sum(z) != 0:
if len(occurence)==3 and occurence[0][1]>occurence[2][1]:
if z[i] == -1:
z[i] = 0
elif len(occurence)==3 and occurence[2][1]>occurence[0][1]:
if z[i] == 1:
z[i] = 0
elif len(occurence) < 3:
if z[i] == -1:
z[i] += 1
if z[i] == 1:
z[i] -= 1
else:
return z
def balance(l):
N = len(l)
meanP, meanN = 0,0
c1, c2 = 0,0
for i in range(N):
if l[i] > 0:
meanP += l[i]
c1+=1
if l[i] < 0:
meanN += l[i]
c2+=1
diff = abs(meanP)-abs(meanN)
for i in range(N):
if l[i] < 0:
l[i] -= diff/(c2)
return l
""" Total sum of conn_matrix weights becomes zero """
def balanceN(mat):
N = len(mat)
sumP,sumN = 0,0
c,c2=0,0
for i in range(N):
for j in range(N):
if mat[j][i] > 0:
sumP += mat[j][i]
c+=1
elif mat[j][i] < 0:
sumN += mat[j][i]
c2+=1
diff = sumP + sumN
for i in range(N):
for j in range(N):
if mat[j][i] < 0:
mat[j][i] -= diff/c2
""" Returns a counter 'c' in case a number 'n' is not (close to) zero """
def check_count(c, n):
if n <= -1e-4 or n>= 1e-4:
c+=1
return c
def number_conns(mat,n):
zed,pos,neg=[],[],[]
for i in range(nrns):
zed.append(len(plt.find(np.abs(mat[i,:])) != 0))
pos.append(len(plt.find((mat[i,:]) > 0)))
neg.append(len(plt.find((mat[i,:]) < 0)))
#print pos[n]
return zed[n],pos[n],neg[n]
def n_where(n,mat):
a = [layers23,layers4,layers5,exc_nrns_set,inh_nrns_set]
if n in a[0]:
print "in Layer23"
elif n in a[1]:
print "in Layer4"
elif n in a[2]:
print "in Layer5"
if n in a[3]:
print "Excitatory"
elif n in a[4]:
print "Inhibitory"
print "(All, Exc, Inh)"
print number_conns(mat,n)
#return "Done" | gpl-2.0 |
Titan-C/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 100 | 2269 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y],
edgecolors=(0, 0, 0))
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 8 | 4706 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
pl.figure(figsize=(12, 8))
pl.subplot(221)
pl.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
pl.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
pl.xlabel("x scores")
pl.ylabel("y scores")
pl.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
pl.xticks(())
pl.yticks(())
pl.legend(loc="best")
pl.subplot(224)
pl.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
pl.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
pl.xlabel("x scores")
pl.ylabel("y scores")
pl.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
pl.xticks(())
pl.yticks(())
pl.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
pl.subplot(222)
pl.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
pl.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
pl.xlabel("X comp. 1")
pl.ylabel("X comp. 2")
pl.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
pl.legend(loc="best")
pl.xticks(())
pl.yticks(())
pl.subplot(223)
pl.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
pl.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
pl.xlabel("Y comp. 1")
pl.ylabel("Y comp. 2")
pl.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
pl.legend(loc="best")
pl.xticks(())
pl.yticks(())
pl.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
wanggang3333/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
cainiaocome/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
pbrod/scipy | scipy/signal/spectral.py | 6 | 66649 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be one-dimensional and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
Examples
--------
>>> import scipy.signal
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0018156616014838548
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = np.sum((win[ii*step:(ii+1)*step] for ii in range(nperseg//step)),
axis=0)
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned. Defaults to
`True`.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
The COLA constraint ensures that every point in the input data is
equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is True,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares esimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
if not check_COLA(window, nperseg, noverlap):
raise ValueError('Window, STFT shape and noverlap do not satisfy the '
'COLA constraint.')
# Rearrange axes if neccessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
if input_onesided:
ifunc = np.fft.irfft
else:
ifunc = fftpack.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Divide out normalization where non-tiny
x /= np.where(norm > 1e-10, norm, 1.0)
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='spectrum', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving
average filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = fftpack.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = np.fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input valdiation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating
data?", http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = fftpack.fft
else:
result = result.real
func = np.fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg,input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
---------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
#parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different from"
" length of window")
return win, nperseg
| bsd-3-clause |
ina-foss/ID-Fits | lib/unstable/learning/mahalanobis_metric.py | 1 | 3166 | # ID-Fits
# Copyright (c) 2015 Institut National de l'Audiovisuel, INA, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import sys
import random
import numpy as np
from sklearn.linear_model import SGDClassifier
class DiagonalMahalanobisMetric:
def __init__(self, W=None):
self.W_ = W
def fit(self, X, y, n_samples=-1, n_iter=5):
if n_samples <= 0:
n_samples = X.shape[0] * (X.shape[0]-1) / 2
random_sampling = False
else:
random_sampling = True
n_features = X.shape[1]
X2 = np.empty((n_samples, n_features), dtype=np.float64)
y2 = np.empty((n_samples), dtype=np.int8)
print "Creating X and y vectors..."
sys.stdout.flush()
# Keep every samples
if not random_sampling:
n = X.shape[0]
index = 0
for i in range(n):
for j in range(i+1, n):
X2[index] = (X[i] - X[j]) ** 2
if y[i] == y[j]:
y2[index] = -1
else:
y2[index] = 1
index += 1
# Keep subset of original samples
else:
labels = list(set(y))
Xk = [X[y == label] for label in labels]
for i in range(n_samples):
coin = int(2*random.random())
if coin == 0:
y_ = -1
found = False
while not found:
samples = random.choice(Xk)
found = samples.shape[0] >= 2
x1, x2 = random.sample(samples, 2)
else:
y_ = 1
class1, class2 = random.sample(Xk, 2)
x1 = random.choice(class1)
x2 = random.choice(class2)
X2[i] = (x1 - x2) ** 2
y2[i] = y_
print "Performing SGD..."
sys.stdout.flush()
svm = SGDClassifier(loss='hinge', penalty='l2', shuffle=True, class_weight='auto', alpha=0.01, n_iter=n_iter)
svm.fit(X2, y2)
print "Finished with score: %f" % svm.score(X2, y2)
self.W_ = svm.coef_[0]
self.b_ = svm.intercept_[0]
def mesureDistance(self, x1, x2):
delta = (x1 - x2) ** 2
return np.inner(self.W_, delta)
"""
def transform(self, X):
return np.dot(X, np.diag(self.W_))
"""
| lgpl-3.0 |
paulorauber/rlnn | model/rl_cm.py | 1 | 5666 | from collections import deque
import numpy as np
from sklearn.utils import check_random_state
def ffnn_control_update(env, ffnn, lstm, episode_buffer, gamma,
control_updates_per_transition, verbose=0,
random_state=None):
episodes = random_state.permutation(len(episode_buffer))
episodes = [i for i in episodes if len(episode_buffer[i]) > 1]
episodes = episodes[0: control_updates_per_transition]
Xb, Yb, mask = [], [], []
for i in episodes:
X = np.array(episode_buffer[i][0: -1])
T = len(X)
states = []
prev_h_a = np.zeros(lstm.n_units[1])
prev_h_s = np.zeros(lstm.n_units[1])
for t in range(T):
state = lstm.forward_pass(X[t], prev_h_a, prev_h_s)
prev_h_a = state['activation_output']
prev_h_s = state['activation_cell']
states.append(state)
for t in range(T):
r = X[t, 0]
s = X[t, 1: env.d_states + 1]
action_code = X[t, env.d_states + 1:]
if t == 0:
prev_h_a = np.zeros(lstm.n_units[1])
prev_h_s = np.zeros(lstm.n_units[1])
else:
prev_h_a = states[t - 1]['activation_output']
prev_h_s = states[t - 1]['activation_cell']
x = np.concatenate([[r], s, prev_h_a, prev_h_s])
next_rs = X[t + 1] if t < T - 1 else episode_buffer[i][-1]
next_r = next_rs[0]
next_s = next_rs[1: env.d_states + 1]
next_action_code = next_rs[env.d_states + 1:]
if np.allclose(next_action_code, 0):
v = next_r
else:
curr_h_a = states[t]['activation_output']
curr_h_s = states[t]['activation_cell']
xprime = np.concatenate([[next_r], next_s, curr_h_a, curr_h_s])
v = next_r + gamma*np.max(ffnn.predict(xprime))
Xb.append(x)
Yb.append(action_code*v)
mask.append(action_code)
Xb, Yb, mask = np.array(Xb), np.array(Yb), np.array(mask)
ffnn.fit_batch(Xb, Yb, mask)
if len(episodes) > 0 and verbose > 1:
error = np.linalg.norm(Yb - ffnn.predict_batch(Xb)*mask)/len(episodes)
print('Controller error: {0}.'.format(error))
def lstm_model_update(env, lstm, episode_buffer, model_updates_per_episode,
verbose=0, random_state=None):
episodes = random_state.permutation(len(episode_buffer))
episodes = episodes[0: model_updates_per_episode]
if verbose > 0:
error = 0.0
for i in episodes:
X = np.array(episode_buffer[i][0: -1])
T = len(X)
Y = np.zeros((T, 1 + env.d_states))
for t in range(T - 1):
Y[t] = X[t + 1, 0: 1 + env.d_states]
Y[T - 1] = episode_buffer[i][-1][0: 1 + env.d_states]
lstm.fit(X, Y, np.ones(Y.shape))
if verbose > 0:
error += np.linalg.norm(Y - lstm.predict(X))
if len(episodes) > 0 and verbose > 0:
print('Model error: {0}.'.format(error / len(episodes)))
def q_cm(env, ffnn, lstm, n_episodes=1024, gamma=0.98, min_epsilon=0.1,
max_epsilon=0.5, decay_epsilon=0.99, max_queue=256,
model_updates_per_episode=32, control_updates_per_transition=2,
verbose=1, random_state=None):
random_state = check_random_state(random_state)
episode_buffer = deque(maxlen=max_queue)
if verbose > 0:
episode_return = 0.0
episode_gamma = 1.0
epsilon = max(min_epsilon, max_epsilon)
for episode in range(n_episodes):
if verbose > 0:
print('Episode {0}.'.format(episode + 1))
lstm_model_update(env, lstm, episode_buffer, model_updates_per_episode,
verbose=verbose, random_state=random_state)
episode_buffer.append([])
r = 0.0
s = env.start()
prev_h_a = np.zeros(lstm.n_units[1])
prev_h_s = np.zeros(lstm.n_units[1])
if verbose > 1:
step = 0
print('Step {0}.'.format(step + 1))
if verbose > 2:
print(env)
while not env.ended():
if random_state.uniform(0, 1) < epsilon:
a = random_state.choice(env.n_actions)
else:
x = np.concatenate([[r], s, prev_h_a, prev_h_s])
a = np.argmax(ffnn.predict(x))
action_code = np.zeros(env.n_actions)
action_code[a] = 1
x = np.concatenate([[r], s, action_code])
episode_buffer[-1].append(x)
lstm_state = lstm.forward_pass(x, prev_h_a, prev_h_s)
prev_h_a = lstm_state['activation_output']
prev_h_s = lstm_state['activation_cell']
s, r = env.next_state_reward(a)
ffnn_control_update(env, ffnn, lstm, episode_buffer, gamma,
control_updates_per_transition,
verbose=verbose, random_state=random_state)
if verbose > 0:
episode_return += episode_gamma*r
episode_gamma *= gamma
if verbose > 1:
step += 1
print('Step {0}.'.format(step + 1))
if verbose > 2:
print(env)
x = np.concatenate([[r], s, np.zeros(env.n_actions)])
episode_buffer[-1].append(x)
epsilon = max(min_epsilon, epsilon*decay_epsilon)
if verbose > 0:
print('Return: {0}.'.format(episode_return))
episode_return = 0.0
episode_gamma = 1.0
return ffnn, lstm
| mit |
alorenzo175/pvlib-python | pvlib/iotools/surfrad.py | 1 | 7027 | """
Import functions for NOAA SURFRAD Data.
"""
import io
from urllib.request import urlopen, Request
import pandas as pd
import numpy as np
SURFRAD_COLUMNS = [
'year', 'jday', 'month', 'day', 'hour', 'minute', 'dt', 'zen',
'dw_solar', 'dw_solar_flag', 'uw_solar', 'uw_solar_flag', 'direct_n',
'direct_n_flag', 'diffuse', 'diffuse_flag', 'dw_ir', 'dw_ir_flag',
'dw_casetemp', 'dw_casetemp_flag', 'dw_dometemp', 'dw_dometemp_flag',
'uw_ir', 'uw_ir_flag', 'uw_casetemp', 'uw_casetemp_flag', 'uw_dometemp',
'uw_dometemp_flag', 'uvb', 'uvb_flag', 'par', 'par_flag', 'netsolar',
'netsolar_flag', 'netir', 'netir_flag', 'totalnet', 'totalnet_flag',
'temp', 'temp_flag', 'rh', 'rh_flag', 'windspd', 'windspd_flag',
'winddir', 'winddir_flag', 'pressure', 'pressure_flag']
# Dictionary mapping surfrad variables to pvlib names
VARIABLE_MAP = {
'zen': 'solar_zenith',
'dw_solar': 'ghi',
'dw_solar_flag': 'ghi_flag',
'direct_n': 'dni',
'direct_n_flag': 'dni_flag',
'diffuse': 'dhi',
'diffuse_flag': 'dhi_flag',
'temp': 'temp_air',
'temp_flag': 'temp_air_flag',
'windspd': 'wind_speed',
'windspd_flag': 'wind_speed_flag',
'winddir': 'wind_direction',
'winddir_flag': 'wind_direction_flag',
'rh': 'relative_humidity',
'rh_flag': 'relative_humidity_flag'
}
def read_surfrad(filename, map_variables=True):
"""Read in a daily NOAA SURFRAD[1] file.
Parameters
----------
filename: str
Filepath or url.
map_variables: bool
When true, renames columns of the Dataframe to pvlib variable names
where applicable. See variable SURFRAD_COLUMNS.
Returns
-------
Tuple of the form (data, metadata).
data: Dataframe
Dataframe with the fields found below.
metadata: dict
Site metadata included in the file.
Notes
-----
Metadata dictionary includes the following fields:
=============== ====== ===============
Key Format Description
=============== ====== ===============
station String site name
latitude Float site latitude
longitude Float site longitude
elevation Int site elevation
surfrad_version Int surfrad version
tz String Timezone (UTC)
=============== ====== ===============
Dataframe includes the following fields:
======================= ====== ==========================================
raw, mapped Format Description
======================= ====== ==========================================
**Mapped field names are returned when the map_variables argument is True**
---------------------------------------------------------------------------
year int year as 4 digit int
jday int day of year 1-365(or 366)
month int month (1-12)
day int day of month(1-31)
hour int hour (0-23)
minute int minute (0-59)
dt float decimal time i.e. 23.5 = 2330
zen, solar_zenith float solar zenith angle (deg)
**Fields below have associated qc flags labeled <field>_flag.**
---------------------------------------------------------------------------
dw_solar, ghi float downwelling global solar(W/m^2)
uw_solar float updownwelling global solar(W/m^2)
direct_n, dni float direct normal solar (W/m^2)
diffuse, dhi float downwelling diffuse solar (W/m^2)
dw_ir float downwelling thermal infrared (W/m^2)
dw_casetemp float downwelling IR case temp (K)
dw_dometemp float downwelling IR dome temp (K)
uw_ir float upwelling thermal infrared (W/m^2)
uw_casetemp float upwelling IR case temp (K)
uw_dometemp float upwelling IR case temp (K)
uvb float global uvb (miliWatts/m^2)
par float photosynthetically active radiation(W/m^2)
netsolar float net solar (dw_solar - uw_solar) (W/m^2)
netir float net infrared (dw_ir - uw_ir) (W/m^2)
totalnet float net radiation (netsolar+netir) (W/m^2)
temp, temp_air float 10-meter air temperature (?C)
rh, relative_humidity float relative humidity (%)
windspd, wind_speed float wind speed (m/s)
winddir, wind_direction float wind direction (deg, clockwise from north)
pressure float station pressure (mb)
======================= ====== ==========================================
See README files located in the station directories in the SURFRAD
data archives[2] for details on SURFRAD daily data files.
References
----------
[1] NOAA Earth System Research Laboratory Surface Radiation Budget Network
`SURFRAD Homepage <https://www.esrl.noaa.gov/gmd/grad/surfrad/>`_
[2] NOAA SURFRAD Data Archive
`SURFRAD Archive <ftp://aftp.cmdl.noaa.gov/data/radiation/surfrad/>`_
"""
if filename.startswith('ftp'):
req = Request(filename)
response = urlopen(req)
file_buffer = io.StringIO(response.read().decode(errors='ignore'))
else:
file_buffer = open(filename, 'r')
# Read and parse the first two lines to build the metadata dict.
station = file_buffer.readline()
file_metadata = file_buffer.readline()
metadata_list = file_metadata.split()
metadata = {}
metadata['name'] = station.strip()
metadata['latitude'] = float(metadata_list[0])
metadata['longitude'] = float(metadata_list[1])
metadata['elevation'] = float(metadata_list[2])
metadata['surfrad_version'] = int(metadata_list[-1])
metadata['tz'] = 'UTC'
data = pd.read_csv(file_buffer, delim_whitespace=True,
header=None, names=SURFRAD_COLUMNS)
file_buffer.close()
data = format_index(data)
missing = data == -9999.9
data = data.where(~missing, np.NaN)
if map_variables:
data.rename(columns=VARIABLE_MAP, inplace=True)
return data, metadata
def format_index(data):
"""Create UTC localized DatetimeIndex for the dataframe.
Parameters
----------
data: Dataframe
Must contain columns 'year', 'jday', 'hour' and
'minute'.
Return
------
data: Dataframe
Dataframe with a DatetimeIndex localized to UTC.
"""
year = data.year.apply(str)
jday = data.jday.apply(lambda x: '{:03d}'.format(x))
hours = data.hour.apply(lambda x: '{:02d}'.format(x))
minutes = data.minute.apply(lambda x: '{:02d}'.format(x))
index = pd.to_datetime(year + jday + hours + minutes, format="%Y%j%H%M")
data.index = index
data = data.tz_localize('UTC')
return data
| bsd-3-clause |
mahiso/poloniexlendingbot | coinlendingbot/MarketAnalysis.py | 1 | 19413 | import logging
import os
import threading
import time
import traceback
from datetime import datetime
import pandas as pd
import sqlite3 as sqlite
from sqlite3 import Error
import numpy
from coinlendingbot.ExchangeApi import ApiError
import coinlendingbot.Configuration as Config
from coinlendingbot.Data import truncate
# Improvements
# [ ] Provide something that takes into account dust offers. (The golden cross works well on BTC, not slower markets)
# [ ] RE: above. Weighted rate.
# [ ] Add docstring to everything
# [ ] Unit tests
# NOTES
# * A possible solution for the dust problem is take the top 10 offers and if the offer amount is less than X% of the
# total available, ignore it as dust.
class MarketDataException(Exception):
pass
class MarketAnalysis(object):
def __init__(self, config, api):
self.logger = logging.getLogger(__name__)
self.currencies_to_analyse = config.get_currencies_list('analyseCurrencies', 'MarketAnalysis')
self.update_interval = int(config.get('MarketAnalysis', 'analyseUpdateInterval', 10, 1, 3600))
self.api = api
self.lending_style = int(config.get('MarketAnalysis', 'lendingStyle', 75, 1, 99))
self.recorded_levels = 10
self.modules_dir = os.path.dirname(os.path.realpath(__file__))
self.top_dir = os.path.dirname(self.modules_dir)
self.db_dir = os.path.join(self.top_dir, 'market_data')
self.recorded_levels = int(config.get('MarketAnalysis', 'recorded_levels', 3, 1, 100))
self.data_tolerance = float(config.get('MarketAnalysis', 'data_tolerance', 15, 10, 90))
self.MACD_long_win_seconds = int(config.get('MarketAnalysis', 'MACD_long_win_seconds',
60 * 30 * 1 * 1,
60 * 1 * 1 * 1,
60 * 60 * 24 * 7))
self.percentile_seconds = int(config.get('MarketAnalysis', 'percentile_seconds',
60 * 60 * 24 * 1,
60 * 60 * 1 * 1,
60 * 60 * 24 * 14))
if self.MACD_long_win_seconds > self.percentile_seconds:
keep_sec = self.MACD_long_win_seconds
else:
keep_sec = self.percentile_seconds
self.keep_history_seconds = int(config.get('MarketAnalysis', 'keep_history_seconds',
int(keep_sec * 1.1),
int(keep_sec * 1.1),
60 * 60 * 24 * 14))
self.MACD_short_win_seconds = int(config.get('MarketAnalysis', 'MACD_short_win_seconds',
int(self.MACD_long_win_seconds / 12),
1,
self.MACD_long_win_seconds / 2))
self.daily_min_multiplier = float(config.get('Daily_min', 'multiplier', 1.05, 1))
self.delete_thread_sleep = float(config.get('MarketAnalysis', 'delete_thread_sleep',
self.keep_history_seconds / 2,
60,
60 * 60 * 2))
self.exchange = config.get_exchange()
if len(self.currencies_to_analyse) != 0:
for currency in self.currencies_to_analyse:
try:
self.api.return_loan_orders(currency, 5)
except Exception as cur_ex:
raise Exception("ERROR: You entered an incorrect currency: '{0}' to analyse the market of, please "
"check your settings. Error message: {1}".format(currency, cur_ex))
def run(self):
"""
Main entry point to start recording data. This starts all the other threads.
"""
for cur in self.currencies_to_analyse:
db_con = self.create_connection(cur)
self.create_rate_table(db_con, self.recorded_levels)
db_con.close()
self.run_threads()
self.run_del_threads()
def run_threads(self):
"""
Start threads for each currency we want to record. (should be configurable later)
"""
for _ in ['thread1']:
for cur in self.currencies_to_analyse:
thread = threading.Thread(target=self.update_market_thread, args=(cur,))
thread.deamon = True
thread.start()
def run_del_threads(self):
"""
Start thread to start the DB cleaning threads.
"""
for _ in ['thread1']:
for cur in self.currencies_to_analyse:
del_thread = threading.Thread(target=self.delete_old_data_thread, args=(cur, self.keep_history_seconds))
del_thread.daemon = False
del_thread.start()
def delete_old_data_thread(self, cur, seconds):
"""
Thread to clean the DB.
"""
while True:
try:
db_con = self.create_connection(cur)
self.delete_old_data(db_con, seconds)
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
self.logger.error("Error in MarketAnalysis: {0}\n".format(ex.message)
+ traceback.format_exc())
time.sleep(self.delete_thread_sleep)
def update_market_thread(self, cur, levels=None):
"""
This is where the main work is done for recording the market data. The loop will not exit and continuously
polls exchange for the current loans in the book.
:param cur: The currency (database) to remove data from
:param levels: The depth of offered rates to store
"""
if levels is None:
levels = self.recorded_levels
db_con = self.create_connection(cur)
update_time = datetime.utcfromtimestamp(0)
while True:
try:
raw_data = self.api.return_loan_orders(cur, levels)
if (raw_data["update_time"] - update_time).total_seconds() > 0:
update_time = raw_data["update_time"]
market_data = []
for i in range(levels):
try:
market_data.append(str(raw_data['offers'][i]['rate']))
market_data.append(str(raw_data['offers'][i]['amount']))
except IndexError:
market_data.append("5")
market_data.append("0.1")
market_data.append('0') # Percentile field not being filled yet.
self.insert_into_db(db_con, market_data)
except ApiError as ex:
if '429' in str(ex):
self.logger.warning("Caught ERR_RATE_LIMIT, sleeping capture and increasing request delay. " +
"Current {0}ms".format(self.api.req_period))
time.sleep(130)
except Exception as ex:
self.logger.error("Error in returning data from exchange: {} : {}".format(ex, raw_data))
self.logger.debug(traceback.format_exc())
time.sleep(0.5)
def insert_into_db(self, db_con, market_data, levels=None):
if levels is None:
levels = self.recorded_levels
insert_sql = "INSERT INTO loans ("
for level in range(levels):
insert_sql += "rate{0}, amnt{0}, ".format(level)
insert_sql += "percentile) VALUES ({0});".format(','.join(market_data)) # percentile = 0
with db_con:
try:
db_con.execute(insert_sql)
except Exception as ex:
self.logger.error("Error inserting market data into DB: {}".format(ex))
def delete_old_data(self, db_con, seconds):
"""
Delete old data from the database
:param db_con: Connection to the database
:param cur: The currency (database) to remove data from
:param seconds: The time in seconds of the oldest data to be kept
"""
del_time = int(time.time()) - seconds
with db_con:
query = "DELETE FROM loans WHERE unixtime < {0};".format(del_time)
cursor = db_con.cursor()
cursor.execute(query)
@staticmethod
def get_day_difference(date_time): # Will be a number of seconds since epoch
"""
Get the difference in days between the supplied date_time and now.
:param date_time: A python date time object
:return: The number of days that have elapsed since date_time
"""
date1 = datetime.fromtimestamp(float(date_time))
now = datetime.utcnow()
diff_days = (now - date1).days
return diff_days
def get_rate_list(self, cur, seconds):
"""
Query the database (cur) for rates that are within the supplied number of seconds and now.
:param cur: The currency (database) to remove data from
:param seconds: The number of seconds between the oldest order returned and now.
:return: A pandas DataFrame object with named columns ('time', 'rate0', 'rate1',...)
"""
# Request more data from the DB than we need to allow for skipped seconds
request_seconds = int(seconds * 1.1)
full_list = Config.get_all_currencies()
if isinstance(cur, sqlite.Connection):
db_con = cur
else:
if cur not in full_list:
raise ValueError("{0} is not a valid currency, must be one of {1}".format(cur, full_list))
if cur not in self.currencies_to_analyse:
return []
db_con = self.create_connection(cur)
price_levels = ['rate0']
rates = self.get_rates_from_db(db_con, from_date=time.time() - request_seconds, price_levels=price_levels)
if len(rates) == 0:
return []
df = pd.DataFrame(rates)
columns = ['time']
columns.extend(price_levels)
try:
df.columns = columns
except Exception as ex:
self.logger.error("get_rate_list: cols: {0} rates:{1} db:{2}".format(columns, rates, db_con))
raise ex
# convert unixtimes to datetimes so we can resample
df.time = pd.to_datetime(df.time, unit='s')
# If we don't have enough data return df, otherwise the resample will fill out all values with the same data.
# Missing data tolerance allows for a percentage to be ignored and filled in by resampling.
if len(df) < seconds * (self.data_tolerance / 100):
return df
# Resample into 1 second intervals, average if we get two in the same second and fill any empty spaces with the
# previous value
df = df.resample('1s', on='time').mean().ffill()
return df
def get_analysis_seconds(self, method):
"""
Gets the correct number of seconds to use for anylsing data depeding on the method being used.
"""
if method == 'percentile':
return self.percentile_seconds
elif method == 'MACD':
return self.MACD_long_win_seconds
def get_rate_suggestion(self, cur, rates=None, method='percentile'):
"""
Return the suggested rate from analysed data. This is the main method for retrieving data from this module.
Currently this only supports returning of a single value, the suggested rate. However this will be expanded to
suggest a lower and higher rate for spreads.
:param cur: The currency (database) to remove data from
:param rates: This is used for unit testing only. It allows you to populate the data used for the suggestion.
:param method: The method by which you want to calculate the suggestion.
:return: A float with the suggested rate for the currency.
"""
error_msg = "WARN: Exception found when analysing markets, if this happens for more than a couple minutes " +\
"please create a Github issue so we can fix it. Otherwise, you can ignore it. Error"
try:
rates = self.get_rate_list(cur, self.get_analysis_seconds(method)) if rates is None else rates
if not isinstance(rates, pd.DataFrame):
raise ValueError("Rates must be a Pandas DataFrame")
if len(rates) == 0:
self.logger.info("Rate list not populated")
self.logger.debug("get_analysis_seconds: cur: {0} method:{1} rates:{2}"
.format(cur, method, rates))
return 0
if method == 'percentile':
return self.get_percentile(rates.rate0.values.tolist(), self.lending_style)
if method == 'MACD':
macd_rate = truncate(self.get_MACD_rate(cur, rates), 6)
self.logger.debug("Cur:{0}, MACD:{1:.6f}, Perc:{2:.6f}, Best:{3:.6f}"
.format(cur, macd_rate,
self.get_percentile(rates.rate0.values.tolist(), self.lending_style),
rates.rate0.iloc[-1]))
return macd_rate
except MarketDataException:
if method != 'percentile':
self.logger.warning("Caught exception during {0} analysis, using percentile for now".format(method))
return self.get_percentile(rates.rate0.values.tolist(), self.lending_style)
else:
raise
except Exception as ex:
self.logger.error("{}\n{}\n{}".format(error_msg, ex, traceback.format_exc()))
return 0
@staticmethod
def percentile(N, percent, key=lambda x: x):
"""
http://stackoverflow.com/questions/2374640/how-do-i-calculate-percentiles-with-python-numpy/2753343#2753343
Find the percentile of a list of values.
:parameter N: A list of values. Note N MUST BE already sorted.
:parameter percent: A float value from 0.0 to 1.0.
:parameter key: Optional key function to compute value from each element of N.
:return: Percentile of the values
"""
import math
if not N:
return None
k = (len(N) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c - k)
d1 = key(N[int(c)]) * (k - f)
return d0 + d1
def get_percentile(self, rates, lending_style):
"""
Take a list of rates no matter what method is being used, simple list, no pandas / numpy array
"""
result = numpy.percentile(rates, int(lending_style))
result = truncate(result, 6)
return result
def get_MACD_rate(self, cur, rates_df):
"""
Golden cross is a bit of a misnomer. But we're trying to look at the short term moving average and the long
term moving average. If the short term is above the long term then the market is moving in a bullish manner and
it's a good time to lend. So return the short term moving average (scaled with the multiplier).
:param cur: The currency (database) to remove data from
:param rates_df: A pandas DataFrame with times and rates
:param short_period: Length in seconds of the short window for MACD calculations
:param long_period: Length in seconds of the long window for MACD calculations
:param multiplier: The multiplier to apply to the rate before returning.
:retrun: A float of the suggested, calculated rate
"""
if len(rates_df) < self.get_analysis_seconds('MACD') * (self.data_tolerance / 100):
self.logger.info("{0}: Need more data for analysis, still collecting. I have {1}/{2} records"
.format(cur, len(rates_df),
int(self.get_analysis_seconds('MACD') * (self.data_tolerance / 100))))
raise MarketDataException
short_rate = rates_df.rate0.tail(self.MACD_short_win_seconds).mean()
long_rate = rates_df.rate0.tail(self.MACD_long_win_seconds).mean()
self.logger.debug("Short higher" if short_rate > long_rate else "Long higher")
if short_rate > long_rate:
if rates_df.rate0.iloc[-1] < short_rate:
return short_rate * self.daily_min_multiplier
else:
return rates_df.rate0.iloc[-1] * self.daily_min_multiplier
else:
return long_rate * self.daily_min_multiplier
def create_connection(self, cur, db_path=None, db_type='sqlite3'):
"""
Create a connection to the sqlite DB. This will create a new file if one doesn't exist. We can use :memory:
here for db_path if we don't want to store the data on disk
:param cur: The currency (database) in the DB
:param db_path: DB directory
:return: Connection object or None
"""
if db_path is None:
prefix = Config.get_exchange()
db_path = os.path.join(self.db_dir, '{0}-{1}.db'.format(prefix, cur))
try:
con = sqlite.connect(db_path)
return con
except Error as ex:
self.logger.error(ex.message)
def create_rate_table(self, db_con, levels):
"""
Create a new table to hold rate data.
:param db_con: Connection to the database
:param cur: The currency being stored in the DB. There's a table for each currency.
:param levels: The depth of offered rates to store
"""
with db_con:
cursor = db_con.cursor()
create_table_sql = "CREATE TABLE IF NOT EXISTS loans (id INTEGER PRIMARY KEY AUTOINCREMENT," + \
"unixtime integer(4) not null default (strftime('%s','now')),"
for level in range(levels):
create_table_sql += "rate{0} FLOAT, ".format(level)
create_table_sql += "amnt{0} FLOAT, ".format(level)
create_table_sql += "percentile FLOAT);"
cursor.execute("PRAGMA journal_mode=wal")
cursor.execute(create_table_sql)
def get_rates_from_db(self, db_con, from_date=None, price_levels=['rate0']):
"""
Query the DB for all rates for a particular currency
:param db_con: Connection to the database
:param cur: The currency you want to get the rates for
:param from_date: The earliest data you want, specified in unix time (seconds since epoch)
:price_level: We record multiple price levels in the DB, the best offer being rate0
"""
with db_con:
cursor = db_con.cursor()
query = "SELECT unixtime, {0} FROM loans ".format(",".join(price_levels))
if from_date is not None:
query += "WHERE unixtime > {0}".format(from_date)
query += ";"
cursor.execute(query)
return cursor.fetchall()
| mit |
btabibian/scikit-learn | benchmarks/bench_plot_nmf.py | 8 | 15618 | """
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: Tom Dupre la Tour (benchmark)
# Chih-Jen Linn (original projected gradient NMF implementation)
# Anthony Di Franco (projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import warnings
import numbers
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.utils.testing import ignore_warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition.nmf import NMF
from sklearn.decomposition.nmf import _initialize_nmf
from sklearn.decomposition.nmf import _beta_divergence
from sklearn.decomposition.nmf import INTEGER_TYPES, _check_init
from sklearn.externals.joblib import Memory
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot, squared_norm
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir='.', verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = np.dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.",
ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha,
l1_ratio):
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., nls_max_iter=10):
self.nls_max_iter = nls_max_iter
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self, 'components_')
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self, 'components_')
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if (not isinstance(n_components, INTEGER_TYPES) or
n_components <= 0):
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(self.max_iter, INTEGER_TYPES) or self.max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
# check W and H, or initialize them
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=self.init,
random_state=self.random_state)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X, W, H, self.tol, self.max_iter, self.nls_max_iter,
self.alpha, self.l1_ratio)
else: # transform
Wt, _, n_iter = _nls_subproblem(X.T, H.T, W.T, self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = 'bgr'
markers = 'ovs'
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df['init'])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df['method'])):
mask = np.logical_and(results_df['init'] == init,
results_df['method'] == method)
selected_items = results_df[mask]
plt.plot(selected_items['time'], selected_items['loss'],
color=colors[j % len(colors)], ls='-',
marker=markers[j % len(markers)],
label=method)
plt.legend(loc=0, fontsize='x-small')
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=['X', 'W0', 'H0'])
def bench_one(name, X, W0, H0, X_shape, clf_type, clf_params, init,
n_components, random_state):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(('nndsvd', 'nndsvdar', 'random')):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params['alpha'] = alpha
clf_params['l1_ratio'] = l1_ratio
clf_params['max_iter'] = max_iter
clf_params['tol'] = tol
clf_params['random_state'] = rs
clf_params['init'] = 'custom'
clf_params['n_components'] = n_components
this_loss, duration = bench_one(name, X, W, H, X.shape,
clf_type, clf_params,
init, n_components, rs)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results,
columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [("Coordinate Descent", NMF, cd_iters, {'solver': 'cd'}),
("Projected Gradient", _PGNMF, pg_iters, {'solver': 'pg'}),
("Multiplicative Update", NMF, mu_iters, {'solver': 'mu'}),
]
return clfs
if __name__ == '__main__':
alpha = 0.
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(X_faces, clfs, plot_name, n_components, tol, alpha, l1_ratio,)
plt.show()
| bsd-3-clause |
walterreade/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
nesterione/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
dominicelse/scipy | scipy/interpolate/interpolate.py | 7 | 100897 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
dot, ravel, poly1d, asarray, intp)
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'
where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of zeroth, first, second or third order) or as an
integer specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
# The 3 private functions below can be called by splmake().
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# Remove the 3 private functions above as well when removing splmake
@np.deprecate(message="splmake is deprecated in scipy 0.19.0, "
"use make_interp_spline instead.")
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
@np.deprecate(message="spleval is deprecated in scipy 0.19.0, "
"use BSpline instead.")
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj, cvals, k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),) + index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv)
res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv)
else:
res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv)
res.shape = oldshape + sh
return res
@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, "
"use PPoly.from_spline instead.")
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple."""
return ppform.fromspline(xk, cvals, k)
@np.deprecate(message="spline is deprecated in scipy 0.19.0, "
"use Bspline class instead.")
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
| bsd-3-clause |
perimosocordiae/scipy | scipy/spatial/transform/_rotation_spline.py | 12 | 14054 | import numpy as np
from scipy.linalg import solve_banded
from .rotation import Rotation
def _create_skew_matrix(x):
"""Create skew-symmetric matrices corresponding to vectors.
Parameters
----------
x : ndarray, shape (n, 3)
Set of vectors.
Returns
-------
ndarray, shape (n, 3, 3)
"""
result = np.zeros((len(x), 3, 3))
result[:, 0, 1] = -x[:, 2]
result[:, 0, 2] = x[:, 1]
result[:, 1, 0] = x[:, 2]
result[:, 1, 2] = -x[:, 0]
result[:, 2, 0] = -x[:, 1]
result[:, 2, 1] = x[:, 0]
return result
def _matrix_vector_product_of_stacks(A, b):
"""Compute the product of stack of matrices and vectors."""
return np.einsum("ijk,ik->ij", A, b)
def _angular_rate_to_rotvec_dot_matrix(rotvecs):
"""Compute matrices to transform angular rates to rot. vector derivatives.
The matrices depend on the current attitude represented as a rotation
vector.
Parameters
----------
rotvecs : ndarray, shape (n, 3)
Set of rotation vectors.
Returns
-------
ndarray, shape (n, 3, 3)
"""
norm = np.linalg.norm(rotvecs, axis=1)
k = np.empty_like(norm)
mask = norm > 1e-4
nm = norm[mask]
k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2
mask = ~mask
nm = norm[mask]
k[mask] = 1/12 + 1/720 * nm**2
skew = _create_skew_matrix(rotvecs)
result = np.empty((len(rotvecs), 3, 3))
result[:] = np.identity(3)
result[:] += 0.5 * skew
result[:] += k[:, None, None] * np.matmul(skew, skew)
return result
def _rotvec_dot_to_angular_rate_matrix(rotvecs):
"""Compute matrices to transform rot. vector derivatives to angular rates.
The matrices depend on the current attitude represented as a rotation
vector.
Parameters
----------
rotvecs : ndarray, shape (n, 3)
Set of rotation vectors.
Returns
-------
ndarray, shape (n, 3, 3)
"""
norm = np.linalg.norm(rotvecs, axis=1)
k1 = np.empty_like(norm)
k2 = np.empty_like(norm)
mask = norm > 1e-4
nm = norm[mask]
k1[mask] = (1 - np.cos(nm)) / nm ** 2
k2[mask] = (nm - np.sin(nm)) / nm ** 3
mask = ~mask
nm = norm[mask]
k1[mask] = 0.5 - nm ** 2 / 24
k2[mask] = 1 / 6 - nm ** 2 / 120
skew = _create_skew_matrix(rotvecs)
result = np.empty((len(rotvecs), 3, 3))
result[:] = np.identity(3)
result[:] -= k1[:, None, None] * skew
result[:] += k2[:, None, None] * np.matmul(skew, skew)
return result
def _angular_acceleration_nonlinear_term(rotvecs, rotvecs_dot):
"""Compute the non-linear term in angular acceleration.
The angular acceleration contains a quadratic term with respect to
the derivative of the rotation vector. This function computes that.
Parameters
----------
rotvecs : ndarray, shape (n, 3)
Set of rotation vectors.
rotvecs_dot: ndarray, shape (n, 3)
Set of rotation vector derivatives.
Returns
-------
ndarray, shape (n, 3)
"""
norm = np.linalg.norm(rotvecs, axis=1)
dp = np.sum(rotvecs * rotvecs_dot, axis=1)
cp = np.cross(rotvecs, rotvecs_dot)
ccp = np.cross(rotvecs, cp)
dccp = np.cross(rotvecs_dot, cp)
k1 = np.empty_like(norm)
k2 = np.empty_like(norm)
k3 = np.empty_like(norm)
mask = norm > 1e-4
nm = norm[mask]
k1[mask] = (-nm * np.sin(nm) - 2 * (np.cos(nm) - 1)) / nm ** 4
k2[mask] = (-2 * nm + 3 * np.sin(nm) - nm * np.cos(nm)) / nm ** 5
k3[mask] = (nm - np.sin(nm)) / nm ** 3
mask = ~mask
nm = norm[mask]
k1[mask] = 1/12 - nm ** 2 / 180
k2[mask] = -1/60 + nm ** 2 / 12604
k3[mask] = 1/6 - nm ** 2 / 120
dp = dp[:, None]
k1 = k1[:, None]
k2 = k2[:, None]
k3 = k3[:, None]
return dp * (k1 * cp + k2 * ccp) + k3 * dccp
def _compute_angular_rate(rotvecs, rotvecs_dot):
"""Compute angular rates given rotation vectors and its derivatives.
Parameters
----------
rotvecs : ndarray, shape (n, 3)
Set of rotation vectors.
rotvecs_dot : ndarray, shape (n, 3)
Set of rotation vector derivatives.
Returns
-------
ndarray, shape (n, 3)
"""
return _matrix_vector_product_of_stacks(
_rotvec_dot_to_angular_rate_matrix(rotvecs), rotvecs_dot)
def _compute_angular_acceleration(rotvecs, rotvecs_dot, rotvecs_dot_dot):
"""Compute angular acceleration given rotation vector and its derivatives.
Parameters
----------
rotvecs : ndarray, shape (n, 3)
Set of rotation vectors.
rotvecs_dot : ndarray, shape (n, 3)
Set of rotation vector derivatives.
rotvecs_dot_dot : ndarray, shape (n, 3)
Set of rotation vector second derivatives.
Returns
-------
ndarray, shape (n, 3)
"""
return (_compute_angular_rate(rotvecs, rotvecs_dot_dot) +
_angular_acceleration_nonlinear_term(rotvecs, rotvecs_dot))
def _create_block_3_diagonal_matrix(A, B, d):
"""Create a 3-diagonal block matrix as banded.
The matrix has the following structure:
DB...
ADB..
.ADB.
..ADB
...AD
The blocks A, B and D are 3-by-3 matrices. The D matrices has the form
d * I.
Parameters
----------
A : ndarray, shape (n, 3, 3)
Stack of A blocks.
B : ndarray, shape (n, 3, 3)
Stack of B blocks.
d : ndarray, shape (n + 1,)
Values for diagonal blocks.
Returns
-------
ndarray, shape (11, 3 * (n + 1))
Matrix in the banded form as used by `scipy.linalg.solve_banded`.
"""
ind = np.arange(3)
ind_blocks = np.arange(len(A))
A_i = np.empty_like(A, dtype=int)
A_i[:] = ind[:, None]
A_i += 3 * (1 + ind_blocks[:, None, None])
A_j = np.empty_like(A, dtype=int)
A_j[:] = ind
A_j += 3 * ind_blocks[:, None, None]
B_i = np.empty_like(B, dtype=int)
B_i[:] = ind[:, None]
B_i += 3 * ind_blocks[:, None, None]
B_j = np.empty_like(B, dtype=int)
B_j[:] = ind
B_j += 3 * (1 + ind_blocks[:, None, None])
diag_i = diag_j = np.arange(3 * len(d))
i = np.hstack((A_i.ravel(), B_i.ravel(), diag_i))
j = np.hstack((A_j.ravel(), B_j.ravel(), diag_j))
values = np.hstack((A.ravel(), B.ravel(), np.repeat(d, 3)))
u = 5
l = 5
result = np.zeros((u + l + 1, 3 * len(d)))
result[u + i - j, j] = values
return result
class RotationSpline:
"""Interpolate rotations with continuous angular rate and acceleration.
The rotation vectors between each consecutive orientation are cubic
functions of time and it is guaranteed that angular rate and acceleration
are continuous. Such interpolation are analogous to cubic spline
interpolation.
Refer to [1]_ for math and implementation details.
Parameters
----------
times : array_like, shape (N,)
Times of the known rotations. At least 2 times must be specified.
rotations : `Rotation` instance
Rotations to perform the interpolation between. Must contain N
rotations.
Methods
-------
__call__
References
----------
.. [1] `Smooth Attitude Interpolation
<https://github.com/scipy/scipy/files/2932755/attitude_interpolation.pdf>`_
Examples
--------
>>> from scipy.spatial.transform import Rotation, RotationSpline
Define the sequence of times and rotations from the Euler angles:
>>> times = [0, 10, 20, 40]
>>> angles = [[-10, 20, 30], [0, 15, 40], [-30, 45, 30], [20, 45, 90]]
>>> rotations = Rotation.from_euler('XYZ', angles, degrees=True)
Create the interpolator object:
>>> spline = RotationSpline(times, rotations)
Interpolate the Euler angles, angular rate and acceleration:
>>> angular_rate = np.rad2deg(spline(times, 1))
>>> angular_acceleration = np.rad2deg(spline(times, 2))
>>> times_plot = np.linspace(times[0], times[-1], 100)
>>> angles_plot = spline(times_plot).as_euler('XYZ', degrees=True)
>>> angular_rate_plot = np.rad2deg(spline(times_plot, 1))
>>> angular_acceleration_plot = np.rad2deg(spline(times_plot, 2))
On this plot you see that Euler angles are continuous and smooth:
>>> import matplotlib.pyplot as plt
>>> plt.plot(times_plot, angles_plot)
>>> plt.plot(times, angles, 'x')
>>> plt.title("Euler angles")
>>> plt.show()
The angular rate is also smooth:
>>> plt.plot(times_plot, angular_rate_plot)
>>> plt.plot(times, angular_rate, 'x')
>>> plt.title("Angular rate")
>>> plt.show()
The angular acceleration is continuous, but not smooth. Also note that
the angular acceleration is not a piecewise-linear function, because
it is different from the second derivative of the rotation vector (which
is a piecewise-linear function as in the cubic spline).
>>> plt.plot(times_plot, angular_acceleration_plot)
>>> plt.plot(times, angular_acceleration, 'x')
>>> plt.title("Angular acceleration")
>>> plt.show()
"""
# Parameters for the solver for angular rate.
MAX_ITER = 10
TOL = 1e-9
def _solve_for_angular_rates(self, dt, angular_rates, rotvecs):
angular_rate_first = angular_rates[0].copy()
A = _angular_rate_to_rotvec_dot_matrix(rotvecs)
A_inv = _rotvec_dot_to_angular_rate_matrix(rotvecs)
M = _create_block_3_diagonal_matrix(
2 * A_inv[1:-1] / dt[1:-1, None, None],
2 * A[1:-1] / dt[1:-1, None, None],
4 * (1 / dt[:-1] + 1 / dt[1:]))
b0 = 6 * (rotvecs[:-1] * dt[:-1, None] ** -2 +
rotvecs[1:] * dt[1:, None] ** -2)
b0[0] -= 2 / dt[0] * A_inv[0].dot(angular_rate_first)
b0[-1] -= 2 / dt[-1] * A[-1].dot(angular_rates[-1])
for iteration in range(self.MAX_ITER):
rotvecs_dot = _matrix_vector_product_of_stacks(A, angular_rates)
delta_beta = _angular_acceleration_nonlinear_term(
rotvecs[:-1], rotvecs_dot[:-1])
b = b0 - delta_beta
angular_rates_new = solve_banded((5, 5), M, b.ravel())
angular_rates_new = angular_rates_new.reshape((-1, 3))
delta = np.abs(angular_rates_new - angular_rates[:-1])
angular_rates[:-1] = angular_rates_new
if np.all(delta < self.TOL * (1 + np.abs(angular_rates_new))):
break
rotvecs_dot = _matrix_vector_product_of_stacks(A, angular_rates)
angular_rates = np.vstack((angular_rate_first, angular_rates[:-1]))
return angular_rates, rotvecs_dot
def __init__(self, times, rotations):
from scipy.interpolate import PPoly
if rotations.single:
raise ValueError("`rotations` must be a sequence of rotations.")
if len(rotations) == 1:
raise ValueError("`rotations` must contain at least 2 rotations.")
times = np.asarray(times, dtype=float)
if times.ndim != 1:
raise ValueError("`times` must be 1-dimensional.")
if len(times) != len(rotations):
raise ValueError("Expected number of rotations to be equal to "
"number of timestamps given, got {} rotations "
"and {} timestamps."
.format(len(rotations), len(times)))
dt = np.diff(times)
if np.any(dt <= 0):
raise ValueError("Values in `times` must be in a strictly "
"increasing order.")
rotvecs = (rotations[:-1].inv() * rotations[1:]).as_rotvec()
angular_rates = rotvecs / dt[:, None]
if len(rotations) == 2:
rotvecs_dot = angular_rates
else:
angular_rates, rotvecs_dot = self._solve_for_angular_rates(
dt, angular_rates, rotvecs)
dt = dt[:, None]
coeff = np.empty((4, len(times) - 1, 3))
coeff[0] = (-2 * rotvecs + dt * angular_rates
+ dt * rotvecs_dot) / dt ** 3
coeff[1] = (3 * rotvecs - 2 * dt * angular_rates
- dt * rotvecs_dot) / dt ** 2
coeff[2] = angular_rates
coeff[3] = 0
self.times = times
self.rotations = rotations
self.interpolator = PPoly(coeff, times)
def __call__(self, times, order=0):
"""Compute interpolated values.
Parameters
----------
times : float or array_like
Times of interest.
order : {0, 1, 2}, optional
Order of differentiation:
* 0 (default) : return Rotation
* 1 : return the angular rate in rad/sec
* 2 : return the angular acceleration in rad/sec/sec
Returns
-------
Interpolated Rotation, angular rate or acceleration.
"""
if order not in [0, 1, 2]:
raise ValueError("`order` must be 0, 1 or 2.")
times = np.asarray(times, dtype=float)
if times.ndim > 1:
raise ValueError("`times` must be at most 1-dimensional.")
singe_time = times.ndim == 0
times = np.atleast_1d(times)
rotvecs = self.interpolator(times)
if order == 0:
index = np.searchsorted(self.times, times, side='right')
index -= 1
index[index < 0] = 0
n_segments = len(self.times) - 1
index[index > n_segments - 1] = n_segments - 1
result = self.rotations[index] * Rotation.from_rotvec(rotvecs)
elif order == 1:
rotvecs_dot = self.interpolator(times, 1)
result = _compute_angular_rate(rotvecs, rotvecs_dot)
elif order == 2:
rotvecs_dot = self.interpolator(times, 1)
rotvecs_dot_dot = self.interpolator(times, 2)
result = _compute_angular_acceleration(rotvecs, rotvecs_dot,
rotvecs_dot_dot)
else:
assert False
if singe_time:
result = result[0]
return result
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.